Renamed package 'greg' to 'regex'

This commit is contained in:
2025-01-30 09:15:29 -05:00
parent ca8d32cd7f
commit aef8152fc1
13 changed files with 9 additions and 9 deletions

1123
regex/compile.go Normal file

File diff suppressed because it is too large Load Diff

415
regex/matching.go Normal file
View File

@@ -0,0 +1,415 @@
package regex
import (
"fmt"
"sort"
)
// a Match stores a slice of all the capturing groups in a match.
type Match []Group
// a Group represents a group. It contains the start index and end index of the match
type Group struct {
StartIdx int
EndIdx int
}
func newMatch(size int) Match {
toRet := make([]Group, size)
for i := range toRet {
toRet[i].StartIdx = -1
toRet[i].EndIdx = -1
}
return toRet
}
// Returns the number of valid groups in the match
func (m Match) numValidGroups() int {
numValid := 0
for _, g := range m {
if g.StartIdx >= 0 && g.EndIdx >= 0 {
numValid++
}
}
return numValid
}
// Returns a string containing the indices of all (valid) groups in the match
func (m Match) ToString() string {
var toRet string
for i, g := range m {
if g.isValid() {
toRet += fmt.Sprintf("Group %d\n", i)
toRet += g.toString()
toRet += "\n"
}
}
return toRet
}
// Converts the Group into a string representation:
func (idx Group) toString() string {
return fmt.Sprintf("%d\t%d", idx.StartIdx, idx.EndIdx)
}
// Returns whether a group contains valid indices
func (g Group) isValid() bool {
return g.StartIdx >= 0 && g.EndIdx >= 0
}
// takeZeroState takes the 0-state (if such a transition exists) for all states in the
// given slice. It returns the resulting states. If any of the resulting states is a 0-state,
// the second ret val is true.
// If a state begins or ends a capturing group, its 'thread' is updated to contain the correct index.
func takeZeroState(states []*State, numGroups int, idx int) (rtv []*State, isZero bool) {
for _, state := range states {
if len(state.transitions[EPSILON]) > 0 {
for _, s := range state.transitions[EPSILON] {
if s.threadGroups == nil {
s.threadGroups = newMatch(numGroups + 1)
}
copy(s.threadGroups, state.threadGroups)
if s.groupBegin {
s.threadGroups[s.groupNum].StartIdx = idx
// openParenGroups = append(openParenGroups, s.groupNum)
}
if s.groupEnd {
s.threadGroups[s.groupNum].EndIdx = idx
// closeParenGroups = append(closeParenGroups, s.groupNum)
}
}
rtv = append(rtv, state.transitions[EPSILON]...)
}
}
for _, state := range rtv {
if len(state.transitions[EPSILON]) > 0 {
return rtv, true
}
}
return rtv, false
}
// zeroMatchPossible returns true if a zero-length match is possible
// from any of the given states, given the string and our position in it.
// It uses the same algorithm to find zero-states as the one inside the loop,
// so I should probably put it in a function.
func zeroMatchPossible(str []rune, idx int, numGroups int, states ...*State) bool {
zeroStates, isZero := takeZeroState(states, numGroups, idx)
tempstates := make([]*State, 0, len(zeroStates)+len(states))
tempstates = append(tempstates, states...)
tempstates = append(tempstates, zeroStates...)
num_appended := 0 // number of unique states addded to tempstates
for isZero == true {
zeroStates, isZero = takeZeroState(tempstates, numGroups, idx)
tempstates, num_appended = unique_append(tempstates, zeroStates...)
if num_appended == 0 { // break if we haven't appended any more unique values
break
}
}
for _, state := range tempstates {
if state.isEmpty && (state.assert == NONE || state.checkAssertion(str, idx)) && state.isLast {
return true
}
}
return false
}
// Prunes the slice by removing overlapping indices.
func pruneIndices(indices []Match) []Match {
// First, sort the slice by the start indices
sort.Slice(indices, func(i, j int) bool {
return indices[i][0].StartIdx < indices[j][0].StartIdx
})
toRet := make([]Match, 0, len(indices))
current := indices[0]
for _, idx := range indices[1:] {
// idx doesn't overlap with current (starts after current ends), so add current to result
// and update the current.
if idx[0].StartIdx >= current[0].EndIdx {
toRet = append(toRet, current)
current = idx
} else if idx[0].EndIdx > current[0].EndIdx {
// idx overlaps, but it is longer, so update current
current = idx
}
}
// Add last state
toRet = append(toRet, current)
return toRet
}
// FindString returns a _string_ containing the _text_ of the _leftmost_ match of
// the regex, in the given string. The return value will be an empty string in two situations:
// 1. No match was found
// 2. The match was an empty string
func FindString(regex Reg, str string) string {
match, err := FindNthMatch(regex, str, 1)
if err != nil {
return ""
}
return str[match[0].StartIdx:match[0].EndIdx]
}
// FindAllString is the 'all' version of FindString.
// It returns a _slice of strings_ containing the _text_ of _all_ matches of
// the regex, in the given string.
//func FindAllString(regex Reg, str []string) []string {
//
//}
// FindNthMatch finds the 'n'th match of the regex represented by the given start-state, with
// the given string.
// It returns an error (!= nil) if there are fewer than 'n' matches in the string.
func FindNthMatch(regex Reg, str string, n int) (Match, error) {
idx := 0
matchNum := 0
str_runes := []rune(str)
var matchFound bool
var matchIdx Match
for idx <= len(str_runes) {
matchFound, matchIdx, idx = findAllMatchesHelper(regex.start, str_runes, idx, regex.numGroups)
if matchFound {
matchNum++
}
if matchNum == n {
return matchIdx, nil
}
}
// We haven't found the nth match after scanning the string - Return an error
return nil, fmt.Errorf("invalid match index - too few matches found")
}
// FindAllMatches tries to find all matches of the regex represented by given start-state, with
// the given string
func FindAllMatches(regex Reg, str string) []Match {
idx := 0
str_runes := []rune(str)
var matchFound bool
var matchIdx Match
indices := make([]Match, 0)
for idx <= len(str_runes) {
matchFound, matchIdx, idx = findAllMatchesHelper(regex.start, str_runes, idx, regex.numGroups)
if matchFound {
indices = append(indices, matchIdx)
}
}
if len(indices) > 0 {
return pruneIndices(indices)
}
return indices
}
// Helper for FindAllMatches. Returns whether it found a match, the
// first Match it finds, and how far it got into the string ie. where
// the next search should start from.
//
// Might return duplicates or overlapping indices, so care must be taken to prune the resulting array.
func findAllMatchesHelper(start *State, str []rune, offset int, numGroups int) (bool, Match, int) {
// Base case - exit if offset exceeds string's length
if offset > len(str) {
// The second value here shouldn't be used, because we should exit when the third return value is > than len(str)
return false, []Group{}, offset
}
// Hold a list of match indices for the current run. When we
// can no longer find a match, the match with the largest range is
// chosen as the match for the entire string.
// This allows us to pick the longest possible match (which is how greedy matching works).
// COMMENT ABOVE IS CURRENTLY NOT UP-TO-DATE
tempIndices := newMatch(numGroups + 1)
foundPath := false
startIdx := offset
endIdx := offset
currentStates := make([]*State, 0)
tempStates := make([]*State, 0) // Used to store states that should be used in next loop iteration
i := offset // Index in string
startingFrom := i // Store starting index
// If the first state is an assertion, makes sure the assertion
// is true before we do _anything_ else.
if start.assert != NONE {
if start.checkAssertion(str, offset) == false {
i++
return false, []Group{}, i
}
}
// Increment until we hit a character matching the start state (assuming not 0-state)
if start.isEmpty == false {
for i < len(str) && !start.contentContains(str, i) {
i++
}
startIdx = i
startingFrom = i
i++ // Advance to next character (if we aren't at a 0-state, which doesn't match anything), so that we can check for transitions. If we advance at a 0-state, we will never get a chance to match the first character
}
start.threadGroups = newMatch(numGroups + 1)
// Check if the start state begins a group - if so, add the start index to our list
if start.groupBegin {
start.threadGroups[start.groupNum].StartIdx = i
// tempIndices[start.groupNum].startIdx = i
}
currentStates = append(currentStates, start)
// Main loop
for i < len(str) {
foundPath = false
zeroStates := make([]*State, 0)
// Keep taking zero-states, until there are no more left to take
// Objective: If any of our current states have transitions to 0-states, replace them with the 0-state. Do this until there are no more transitions to 0-states, or there are no more unique 0-states to take.
zeroStates, isZero := takeZeroState(currentStates, numGroups, i)
tempStates = append(tempStates, zeroStates...)
num_appended := 0
for isZero == true {
zeroStates, isZero = takeZeroState(tempStates, numGroups, i)
tempStates, num_appended = unique_append(tempStates, zeroStates...)
if num_appended == 0 { // Break if we haven't appended any more unique values
break
}
}
currentStates, _ = unique_append(currentStates, tempStates...)
tempStates = nil
// Take any transitions corresponding to current character
numStatesMatched := 0 // The number of states which had at least 1 match for this round
assertionFailed := false // Whether or not an assertion failed for this round
lastStateInList := false // Whether or not a last state was in our list of states
var lastStatePtr *State = nil // Pointer to the last-state, if it was found
lastLookaroundInList := false // Whether or not a last state (that is a lookaround) was in our list of states
for _, state := range currentStates {
matches, numMatches := state.matchesFor(str, i)
if numMatches > 0 {
numStatesMatched++
tempStates = append(tempStates, matches...)
foundPath = true
for _, m := range matches {
if m.threadGroups == nil {
m.threadGroups = newMatch(numGroups + 1)
}
copy(m.threadGroups, state.threadGroups)
}
}
if numMatches < 0 {
assertionFailed = true
}
if state.isLast {
if state.isLookaround() {
lastLookaroundInList = true
}
lastStateInList = true
lastStatePtr = state
}
}
if assertionFailed && numStatesMatched == 0 { // Nothing has matched and an assertion has failed
// If I'm being completely honest, I'm not sure why I have to check specifically for a _lookaround_
// state. The explanation below is my attempt to explain this behavior.
// If you replace 'lastLookaroundInList' with 'lastStateInList', one of the test cases fails.
//
// One of the states in our list was a last state and a lookaround. In this case, we
// don't abort upon failure of the assertion, because we have found
// another path to a final state.
// Even if the last state _was_ an assertion, we can use the previously
// saved indices to find a match.
if lastLookaroundInList {
break
} else {
if i == startingFrom {
i++
}
return false, []Group{}, i
}
}
// Check if we can find a state in our list that is:
// a. A last-state
// b. Empty
// c. Doesn't assert anything
for _, s := range currentStates {
if s.isLast && s.isEmpty && s.assert == NONE {
lastStatePtr = s
lastStateInList = true
}
}
if lastStateInList { // A last-state was in the list of states. add the matchIndex to our MatchIndex list
for j := 1; j < numGroups+1; j++ {
tempIndices[j] = lastStatePtr.threadGroups[j]
}
endIdx = i
tempIndices[0] = Group{startIdx, endIdx}
}
// Check if we can find a zero-length match
if foundPath == false {
if ok := zeroMatchPossible(str, i, numGroups, currentStates...); ok {
if tempIndices[0].isValid() == false {
tempIndices[0] = Group{startIdx, startIdx}
}
}
// If we haven't moved in the string, increment the counter by 1
// to ensure we don't keep trying the same string over and over.
// if i == startingFrom {
startIdx++
// i++
// }
if tempIndices.numValidGroups() > 0 && tempIndices[0].isValid() {
if tempIndices[0].StartIdx == tempIndices[0].EndIdx { // If we have a zero-length match, we have to shift the index at which we start. Otherwise we keep looking at the same paert of the string over and over.
return true, tempIndices, tempIndices[0].EndIdx + 1
} else {
return true, tempIndices, tempIndices[0].EndIdx
}
}
return false, []Group{}, startIdx
}
currentStates = make([]*State, len(tempStates))
copy(currentStates, tempStates)
tempStates = nil
i++
}
// End-of-string reached. Go to any 0-states, until there are no more 0-states to go to. Then check if any of our states are in the end position.
// This is the exact same algorithm used inside the loop, so I should probably put it in a function.
zeroStates, isZero := takeZeroState(currentStates, numGroups, i)
tempStates = append(tempStates, zeroStates...)
num_appended := 0 // Number of unique states addded to tempStates
for isZero == true {
zeroStates, isZero = takeZeroState(tempStates, numGroups, i)
tempStates, num_appended = unique_append(tempStates, zeroStates...)
if num_appended == 0 { // Break if we haven't appended any more unique values
break
}
}
currentStates = append(currentStates, tempStates...)
tempStates = nil
for _, state := range currentStates {
// Only add the match if the start index is in bounds. If the state has an assertion,
// make sure the assertion checks out.
if state.isLast && i <= len(str) {
if state.assert == NONE || state.checkAssertion(str, i) {
for j := 1; j < numGroups+1; j++ {
tempIndices[j] = state.threadGroups[j]
}
endIdx = i
tempIndices[0] = Group{startIdx, endIdx}
}
}
}
if tempIndices.numValidGroups() > 0 {
if tempIndices[0].StartIdx == tempIndices[0].EndIdx { // If we have a zero-length match, we have to shift the index at which we start. Otherwise we keep looking at the same paert of the string over and over.
return true, tempIndices, tempIndices[0].EndIdx + 1
} else {
return true, tempIndices, tempIndices[0].EndIdx
}
}
if startIdx == startingFrom { // Increment starting index if we haven't moved in the string. Prevents us from matching the same part of the string over and over.
startIdx++
}
return false, []Group{}, startIdx
}

142
regex/misc.go Normal file
View File

@@ -0,0 +1,142 @@
package regex
import (
"slices"
"unicode"
)
var whitespaceChars = []rune{' ', '\t', '\n'}
var digitChars = []rune{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
var wordChars = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_")
var LBRACKET rune = 0xF0001
var RBRACKET rune = 0xF0002
var ANY_CHAR rune = 0xF0003 // Represents any character - used for states where the allChars flag is on.
var LPAREN_CHAR rune = 0xF0004 // Parentheses in regex are concatenated with this - it acts as a pseudio-parentheses
var RPAREN_CHAR rune = 0xF0005
var NONCAPLPAREN_CHAR rune = 0xF0006 // Represents a non-capturing group's LPAREN
var ESC_BACKSLASH rune = 0xF0007 // Represents an escaped backslash
var CHAR_RANGE rune = 0xF0008 // Represents a character range
var specialChars = []rune{'?', '*', '\\', '^', '$', '{', '}', '(', ')', '[', ']', '+', '|', '.', CONCAT, '<', '>', LBRACKET, RBRACKET, NONCAPLPAREN_CHAR}
// An interface for int and rune, which are identical
type character interface {
int | rune
}
// Returns true if str[idx] and str[idx-1] are separated by a word boundary.
func isWordBoundary(str []rune, idx int) bool {
str_runes := []rune(str)
wbounded := idx == 0 ||
idx >= len(str) ||
(!slices.Contains(wordChars, str_runes[idx-1]) && slices.Contains(wordChars, str_runes[idx])) ||
(slices.Contains(wordChars, str_runes[idx-1]) && !slices.Contains(wordChars, str_runes[idx]))
return wbounded
}
func isSpecialChar(c rune) bool {
return slices.Contains(specialChars, c)
}
// Some special characters have metacharacter replacements. These characters, when encountered in their literal form, can be treated as regular characters.
func isSpecialCharWithMetacharReplacement(c rune) bool {
return slices.Contains([]rune{'[', ']'}, c)
}
func isNormalChar(c rune) bool {
return !slices.Contains(specialChars, c)
}
// Ensure that the given elements are only appended to the given slice if they
// don't already exist. Returns the new slice, and the number of unique items appended.
func unique_append[T comparable](slc []T, items ...T) ([]T, int) {
num_appended := 0
for _, item := range items {
if !slices.Contains(slc, item) {
slc = append(slc, item)
num_appended++
}
}
return slc, num_appended
}
// Returns true only if all the given elements are equal
func allEqual[T comparable](items ...T) bool {
first := items[0]
for _, item := range items {
if item != first {
return false
}
}
return true
}
// Map function - convert a slice of T to a slice of V, based on a function
// that maps a T to a V
func Map[T, V any](slc []T, fn func(T) V) []V {
toReturn := make([]V, len(slc))
for i, val := range slc {
toReturn[i] = fn(val)
}
return toReturn
}
// Reduce function - reduces a slice of a type into a value of the type,
// based on the given function.
func Reduce[T any](slc []T, fn func(T, T) T) T {
if len(slc) == 0 {
panic("Reduce on empty slice.")
}
for len(slc) > 1 {
v1 := slc[0]
v2 := slc[1]
slc = slc[1:]
slc[0] = fn(v1, v2)
}
return slc[0]
}
// Generate numbers in a range - start to end (both inclusive)
func genRangeInclusive[T character](start, end T) []T {
toRet := make([]T, (end-start)+1)
for i := start; i <= end; i++ {
toRet[i-start] = i
}
return toRet
}
// Returns a rune-slice containing all possible cases of the given rune, given the
// 'caseInsensitive' boolean variable.
// If this variable is false, the rune is returned as-is, without modifications.
// If it is true, then we return all possible cases of the
// rune.
// At the moment, this includes:
// 1. Upper case
// 2. Lower case
// 3. Title case
func allCases(r rune, caseInsensitive bool) []rune {
if caseInsensitive {
return []rune{unicode.ToLower(r), unicode.ToUpper(r), unicode.ToTitle(r)}
} else {
return []rune{r}
}
}
func isHex(c rune) bool {
return slices.Contains([]rune("0123456789abcdefABCDEF"), c)
}
func isOctal(c rune) bool {
return slices.Contains([]rune("01234567"), c)
}
// Replace an element in a slice with another, given both values
func replaceByValue[T comparable](slc []T, toReplace T, replaceWith T) []T {
for i, val := range slc {
if val == toReplace {
slc[i] = replaceWith
}
}
return slc
}

348
regex/nfa.go Normal file
View File

@@ -0,0 +1,348 @@
package regex
import (
"fmt"
"slices"
)
const EPSILON int = 0xF0000
type assertType int
const (
NONE assertType = iota
SOS
EOS
WBOUND
NONWBOUND
PLA // Positive lookahead
NLA // Negative lookahead
PLB // Positive lookbehind
NLB // Negative lookbehind
ALWAYS_TRUE // An assertion that is always true
)
type State struct {
content stateContents // Contents of current state
isEmpty bool // If it is empty - Union operator and Kleene star states will be empty
isLast bool // If it is the last state (acept state)
output []*State // The outputs of the current state ie. the 'outward arrows'. A union operator state will have more than one of these.
transitions map[int][]*State // Transitions to different states (maps a character (int representation) to a _list of states. This is useful if one character can lead multiple states eg. ab|aa)
isKleene bool // Identifies whether current node is a 0-state representing Kleene star
assert assertType // Type of assertion of current node - NONE means that the node doesn't assert anything
allChars bool // Whether or not the state represents all characters (eg. a 'dot' metacharacter). A 'dot' node doesn't store any contents directly, as it would take up too much space
except []rune // Only valid if allChars is true - match all characters _except_ the ones in this block. Useful for inverting character classes.
lookaroundRegex string // Only for lookaround states - Contents of the regex that the lookaround state holds
lookaroundNFA *State // Holds the NFA of the lookaroundRegex - if it exists
lookaroundNumCaptureGroups int // Number of capturing groups in lookaround regex if current node is a lookaround
groupBegin bool // Whether or not the node starts a capturing group
groupEnd bool // Whether or not the node ends a capturing group
groupNum int // Which capturing group the node starts / ends
// The following properties depend on the current match - I should think about resetting them for every match.
zeroMatchFound bool // Whether or not the state has been used for a zero-length match - only relevant for zero states
threadGroups []Group // Assuming that a state is part of a 'thread' in the matching process, this array stores the indices of capturing groups in the current thread. As matches are found for this state, its groups will be copied over.
}
// Clones the NFA starting from the given state.
func cloneState(start *State) *State {
return cloneStateHelper(start, make(map[*State]*State))
}
// Helper function for clone. The map is used to keep track of which states have
// already been copied, and which ones haven't.
// This function was created using output from Llama3.1:405B.
func cloneStateHelper(state *State, cloneMap map[*State]*State) *State {
// Base case - if the clone exists in our map, return it.
if clone, exists := cloneMap[state]; exists {
return clone
}
if state == nil {
return nil
}
// Recursive case - if the clone doesn't exist, create it, add it to the map,
// and recursively call for each of the transition states.
clone := &State{
content: append([]int{}, state.content...),
isEmpty: state.isEmpty,
isLast: state.isLast,
output: make([]*State, len(state.output)),
transitions: make(map[int][]*State),
isKleene: state.isKleene,
assert: state.assert,
zeroMatchFound: state.zeroMatchFound,
allChars: state.allChars,
except: append([]rune{}, state.except...),
lookaroundRegex: state.lookaroundRegex,
groupEnd: state.groupEnd,
groupBegin: state.groupBegin,
groupNum: state.groupNum,
}
cloneMap[state] = clone
for i, s := range state.output {
if s == state {
clone.output[i] = clone
} else {
clone.output[i] = cloneStateHelper(s, cloneMap)
}
}
for k, v := range state.transitions {
clone.transitions[k] = make([]*State, len(v))
for i, s := range v {
if s == state {
clone.transitions[k][i] = clone
} else {
clone.transitions[k][i] = cloneStateHelper(s, cloneMap)
}
}
}
if state.lookaroundNFA == state {
clone.lookaroundNFA = clone
}
clone.lookaroundNFA = cloneStateHelper(state.lookaroundNFA, cloneMap)
return clone
}
// Checks if the given state's assertion is true. Returns true if the given
// state doesn't have an assertion.
func (s State) checkAssertion(str []rune, idx int) bool {
if s.assert == ALWAYS_TRUE {
return true
}
if s.assert == SOS {
// Single-line mode: Beginning of string
// Multi-line mode: Previous character was newline
return idx == 0 || (multilineMode && (idx > 0 && str[idx-1] == '\n'))
}
if s.assert == EOS {
// Single-line mode: End of string
// Multi-line mode: current character is newline
// Index is at the end of the string, or it points to the last character which is a newline
return idx == len(str) || (multilineMode && str[idx] == '\n')
}
if s.assert == WBOUND {
return isWordBoundary(str, idx)
}
if s.assert == NONWBOUND {
return !isWordBoundary(str, idx)
}
if s.isLookaround() {
// The process here is simple:
// 1. Compile the regex stored in the state's contents.
// 2. Run it on a subset of the test string, that ends after the current index in the string
// 3. Based on the kind of lookaround (and the indices we get), determine what action to take.
startState := s.lookaroundNFA
var runesToMatch []rune
var strToMatch string
if s.assert == PLA || s.assert == NLA {
runesToMatch = str[idx:]
} else {
runesToMatch = str[:idx]
}
if len(runesToMatch) == 0 {
strToMatch = ""
} else {
strToMatch = string(runesToMatch)
}
matchIndices := FindAllMatches(Reg{startState, s.lookaroundNumCaptureGroups}, strToMatch)
numMatchesFound := 0
for _, matchIdx := range matchIndices {
if s.assert == PLA || s.assert == NLA { // Lookahead - return true (or false) if at least one match starts at 0. Zero is used because the test-string _starts_ from idx.
if matchIdx[0].StartIdx == 0 {
numMatchesFound++
}
}
if s.assert == PLB || s.assert == NLB { // Lookbehind - return true (or false) if at least one match _ends_ at the current index.
if matchIdx[0].EndIdx == idx {
numMatchesFound++
}
}
}
if s.assert == PLA || s.assert == PLB { // Positive assertions want at least one match
return numMatchesFound > 0
}
if s.assert == NLA || s.assert == NLB { // Negative assertions only want zero matches
return numMatchesFound == 0
}
}
return true
}
// Returns true if the contents of 's' contain the value at the given index of the given string
func (s State) contentContains(str []rune, idx int) bool {
if s.assert != NONE {
return s.checkAssertion(str, idx)
}
if s.allChars {
return !slices.Contains(slices.Concat(notDotChars, s.except), str[idx]) // Return true only if the index isn't a 'notDotChar', or isn't one of the exception characters for the current node.
}
// Default - s.assert must be NONE
return slices.Contains(s.content, int(str[idx]))
}
func (s State) isLookaround() bool {
return s.assert == PLA || s.assert == PLB || s.assert == NLA || s.assert == NLB
}
// Returns the matches for the character at the given index of the given string.
// Also returns the number of matches. Returns -1 if an assertion failed.
func (s State) matchesFor(str []rune, idx int) ([]*State, int) {
// Assertions can be viewed as 'checks'. If the check fails, we return
// an empty array and 0.
// If it passes, we treat it like any other state, and return all the transitions.
if s.assert != NONE {
if s.checkAssertion(str, idx) == false {
return make([]*State, 0), -1
}
}
listTransitions := s.transitions[int(str[idx])]
for _, dest := range s.transitions[int(ANY_CHAR)] {
if !slices.Contains(slices.Concat(notDotChars, dest.except), str[idx]) {
// Add an allChar state to the list of matches if:
// a. The current character isn't a 'notDotChars' character. In single line mode, this includes newline. In multiline mode, it doesn't.
// b. The current character isn't the state's exception list.
listTransitions = append(listTransitions, dest)
}
}
numTransitions := len(listTransitions)
return listTransitions, numTransitions
}
// verifyLastStatesHelper performs the depth-first recursion needed for verifyLastStates
func verifyLastStatesHelper(state *State, visited map[*State]bool) {
if len(state.transitions) == 0 {
state.isLast = true
return
}
// if len(state.transitions) == 1 && len(state.transitions[state.content]) == 1 && state.transitions[state.content][0] == state { // Eg. a*
if len(state.transitions) == 1 { // Eg. a*
var moreThanOneTrans bool // Dummy variable, check if all the transitions for the current's state's contents have a length of one
for _, c := range state.content {
if len(state.transitions[c]) != 1 || state.transitions[c][0] != state {
moreThanOneTrans = true
}
}
state.isLast = !moreThanOneTrans
}
if state.isKleene { // A State representing a Kleene Star has transitions going out, which loop back to it. If all those transitions point to the same (single) state, then it must be a last state
transitionDests := make([]*State, 0)
for _, v := range state.transitions {
transitionDests = append(transitionDests, v...)
}
if allEqual(transitionDests...) {
state.isLast = true
return
}
}
if visited[state] == true {
return
}
visited[state] = true
for _, states := range state.transitions {
for i := range states {
if states[i] != state {
verifyLastStatesHelper(states[i], visited)
}
}
}
}
// verifyLastStates enables the 'isLast' flag for the leaf nodes (last states)
func verifyLastStates(start []*State) {
verifyLastStatesHelper(start[0], make(map[*State]bool))
}
// Concatenates s1 and s2, returns the start of the concatenation.
func concatenate(s1 *State, s2 *State) *State {
if s1 == nil {
return s2
}
for i := range s1.output {
for _, c := range s2.content { // Create transitions for every element in s1's content to s2'
s1.output[i].transitions[c], _ = unique_append(s1.output[i].transitions[c], s2)
}
}
s1.output = s2.output
return s1
}
func kleene(s1 State) (*State, error) {
if s1.isEmpty && s1.assert != NONE {
return nil, fmt.Errorf("previous token is not quantifiable")
}
toReturn := &State{}
toReturn.transitions = make(map[int][]*State)
toReturn.content = newContents(EPSILON)
toReturn.isEmpty = true
toReturn.isKleene = true
toReturn.output = append(toReturn.output, toReturn)
for i := range s1.output {
for _, c := range toReturn.content {
s1.output[i].transitions[c], _ = unique_append(s1.output[i].transitions[c], toReturn)
}
}
for _, c := range s1.content {
toReturn.transitions[c], _ = unique_append(toReturn.transitions[c], &s1)
}
return toReturn, nil
}
func alternate(s1 *State, s2 *State) *State {
toReturn := &State{}
toReturn.transitions = make(map[int][]*State)
toReturn.output = append(toReturn.output, s1.output...)
toReturn.output = append(toReturn.output, s2.output...)
// Unique append is used here (and elsewhere) to ensure that,
// for any given transition, a state can only be mentioned once.
// For example, given the transition 'a', the state 's1' can only be mentioned once.
// This would lead to multiple instances of the same set of match indices, since both
// 's1' states would be considered to match.
for _, c := range s1.content {
toReturn.transitions[c], _ = unique_append(toReturn.transitions[c], s1)
}
for _, c := range s2.content {
toReturn.transitions[c], _ = unique_append(toReturn.transitions[c], s2)
}
toReturn.content = newContents(EPSILON)
toReturn.isEmpty = true
return toReturn
}
func question(s1 *State) *State { // Use the fact that ab? == a(b|)
s2 := &State{}
s2.transitions = make(map[int][]*State)
s2.content = newContents(EPSILON)
s2.output = append(s2.output, s2)
s2.isEmpty = true
s3 := alternate(s1, s2)
return s3
}
// Creates and returns a new state with the 'default' values.
func newState() State {
ret := State{
output: make([]*State, 0),
transitions: make(map[int][]*State),
assert: NONE,
except: append([]rune{}, 0),
lookaroundRegex: "",
groupEnd: false,
groupBegin: false,
}
ret.output = append(ret.output, &ret)
return ret
}
// Creates and returns a state that _always_ has a zero-length match.
func zeroLengthMatchState() State {
start := newState()
start.content = newContents(EPSILON)
start.isEmpty = true
start.assert = ALWAYS_TRUE
return start
}

View File

@@ -0,0 +1,7 @@
In PCRE, following a backreference by _any_ number seems to turn it into an octal value. Why is this?
Eg.
`(a)\1` <-- Backreference
`(a)\17` <-- Octal '17'
`(a)\19` <-- Octal 1, then literal 9
`(a)\1a` <-- Backreference, then 'a'

200
regex/postfixNode.go Normal file
View File

@@ -0,0 +1,200 @@
package regex
import "fmt"
type NodeType int
// This is a slice containing all escapable characters that have special meaning.
// Eg. \b is word boundary, \w is word character etc.
var escapedChars []rune = []rune("wWdDbBnaftrvsS0")
// This is a list of the possible node types
const (
CHARACTER NodeType = iota
CHARCLASS
PIPE
CONCATENATE
KLEENE
QUESTION
PLUS
ASSERTION
LPAREN
RPAREN
)
// Helper constants for lookarounds
const POSITIVE = 1
const NEGATIVE = -1
const LOOKAHEAD = 1
const LOOKBEHIND = -1
var INFINITE_REPS int = -1 // Represents infinite reps eg. the end range in {5,}
// This represents a node in the postfix representation of the expression
type postfixNode struct {
nodetype NodeType
contents []rune // Contents of the node
startReps int // Minimum number of times the node should be repeated - used with numeric specifiers
endReps int // Maximum number of times the node should be repeated - used with numeric specifiers
allChars bool // Whether or not the current node represents all characters (eg. dot metacharacter)
except []postfixNode // For inverted character classes, we match every unicode character _except_ a few. In this case, allChars is true and the exceptions are placed here.
lookaroundSign int // ONLY USED WHEN nodetype == ASSERTION. Whether we have a positive or negative lookaround.
lookaroundDir int // Lookbehind or lookahead
nodeContents []postfixNode // ONLY USED WHEN nodetype == CHARCLASS. Holds all the nodes inside the given CHARCLASS node.
}
// Converts the given list of postfixNodes to one node of type CHARCLASS.
// Used to convert eg. 'a', 'b' and 'c' to '[abc]'.
// If the character class is negated, it returns a postfixNode of type CHARACTER.
// This node will behave like the dot metacharacter, but it has a longer list of runes that
// it will not match.
func newCharClassNode(nodes []postfixNode, negated bool) postfixNode {
rtv := postfixNode{}
rtv.nodetype = CHARCLASS
rtv.startReps = 1
rtv.endReps = 1
if negated {
rtv.nodetype = CHARACTER
rtv.contents = []rune{ANY_CHAR}
rtv.allChars = true
rtv.except = nodes
} else {
rtv.nodeContents = nodes
}
return rtv
}
// Creates a new escaped node - the given character is assumed to have been preceded by a backslash
func newEscapedNode(c rune, inCharClass bool) (postfixNode, error) {
toReturn := postfixNode{}
toReturn.startReps = 1
toReturn.endReps = 1
switch c {
case 's': // Whitespace
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, whitespaceChars...)
case 'S': // Non-whitespace
toReturn = newPostfixDotNode()
toReturn.except = append([]postfixNode{}, newPostfixNode(whitespaceChars...))
case 'd': // Digits
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, digitChars...)
case 'D': // Non-digits
toReturn = newPostfixDotNode()
toReturn.except = append([]postfixNode{}, newPostfixNode(digitChars...))
case 'w': // word character
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, wordChars...)
case 'W': // Non-word character
toReturn = newPostfixDotNode()
toReturn.except = append([]postfixNode{}, newPostfixNode(wordChars...))
case 'b', 'B':
if c == 'b' && inCharClass {
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, rune(8))
} else {
toReturn.nodetype = ASSERTION
toReturn.contents = append(toReturn.contents, c)
}
case 'n': // Newline character
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, '\n')
case '0': // NULL character
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, rune(0))
case 'a': // Bell character
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, rune(7))
case 'f': // Form feed character
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, rune(12))
case 't': // Horizontal tab character
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, rune(9))
case 'r': // Carriage return
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, rune(13))
case 'v': // Vertical tab
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, rune(11))
case '-': // Literal hyphen - only in character class
if inCharClass {
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, '-')
} else {
return postfixNode{}, fmt.Errorf("invalid escape character")
}
default: // None of the above - append it as a regular character
if isNormalChar(c) { // Normal characters cannot be escaped
return postfixNode{}, fmt.Errorf("invalid escape character")
}
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, c)
}
return toReturn, nil
}
// Creates and returns a postfixNode based on the given contents
func newPostfixNode(contents ...rune) postfixNode {
if len(contents) < 1 {
panic("Empty node.")
}
to_return := postfixNode{}
to_return.startReps = 1
to_return.endReps = 1
if len(contents) > 1 { // If the node has more than element, it must be a character class - the type must be CHARACTER
to_return.nodetype = CHARACTER
to_return.contents = contents
} else { // Node has one element, could be anything
switch contents[0] {
case '+':
to_return.nodetype = PLUS
case '?':
to_return.nodetype = QUESTION
case '*':
to_return.nodetype = KLEENE
case '|':
to_return.nodetype = PIPE
case CONCAT:
to_return.nodetype = CONCATENATE
case '^', '$':
to_return.nodetype = ASSERTION
case '(':
to_return.nodetype = LPAREN
case ')':
to_return.nodetype = RPAREN
default:
to_return.nodetype = CHARACTER
}
to_return.contents = append(to_return.contents, contents...)
// Special cases for LPAREN and RPAREN - they have special characters defined for them
if to_return.nodetype == LPAREN {
to_return.contents = []rune{LPAREN_CHAR}
}
if to_return.nodetype == RPAREN {
to_return.contents = []rune{RPAREN_CHAR}
}
}
return to_return
}
// Creates and returns a postfixNode representing the 'dot' metacharacter.
func newPostfixDotNode() postfixNode {
toReturn := postfixNode{}
toReturn.startReps = 1
toReturn.endReps = 1
toReturn.nodetype = CHARACTER
toReturn.allChars = true
toReturn.contents = []rune{ANY_CHAR}
return toReturn
}
// Creates a character node, regardless of the contents
func newPostfixCharNode(contents ...rune) postfixNode {
toReturn := postfixNode{}
toReturn.startReps = 1
toReturn.endReps = 1
toReturn.nodetype = CHARACTER
toReturn.contents = append(toReturn.contents, contents...)
return toReturn
}

126
regex/range2regex.go Normal file
View File

@@ -0,0 +1,126 @@
package regex
import (
"fmt"
"math"
"strconv"
)
type numRange struct {
start int
end int
}
// Returns the exponent of the closest power of 10 smaller
// than the given value.
func floorPower10(val int) int {
return int(math.Floor(math.Log10(float64(val))))
}
// Returns smallest multiple of 10^exp, that is greater than val
func roundUpToNearest10Multiple(val int, exp int) int {
bench := int(math.Round(math.Pow10(exp)))
if val != 0 && val%bench == 0 {
return val
} else {
return (bench - val%bench) + val
}
}
func roundDownToNearest10Multiple(val int, exp int) int {
bench := int(math.Round(math.Pow10(exp)))
return val - val%bench
}
// Converts the given integer into an int-slice, where each element
// represents a digit of the number.
func intToSlc(val int) []int {
valStr := strconv.Itoa(val)
valSlc := []rune(valStr)
toRet := make([]int, len(valStr))
for i, r := range valSlc {
toRet[i] = int(r - 48)
}
return toRet
}
func range2regex(start int, end int) (string, error) {
rangeStart := start
rangeEnd := end
if rangeStart > rangeEnd {
return "", fmt.Errorf("numeric range start greater than range end")
}
ranges := make([]numRange, 0)
// If both numbers are in the same power of 10 eg. 15000 and 17000.
// the maximum power of 10 that we will go to, is determined by the largest
// power of 10 at which both numbers differ. Given 15000 and 17000, we will
// go up to 10^3, because that is the largestindex at which they differ.
startRangeSlc := intToSlc(rangeStart)
endRangeSlc := intToSlc(rangeEnd)
maxPower10 := 0
if len(startRangeSlc) != len(endRangeSlc) { // Different number of digits, so we will go up to the maximum (which must be rangeEnd)
maxPower10 = floorPower10(rangeEnd) // Maximum power of 10 that we will reach
} else {
maxPower10 = 0
for i := range startRangeSlc {
if startRangeSlc[i] != endRangeSlc[i] {
maxPower10 = len(startRangeSlc) - i - 1
break
}
}
}
tmp := rangeStart
exp := 1 // The exponent of 10 that we are finding the range to
// Increasing up to highest power
for exp <= maxPower10 {
tmpRangeEnd := roundUpToNearest10Multiple(tmp, exp)
if tmp != tmpRangeEnd {
ranges = append(ranges, numRange{tmp, tmpRangeEnd - 1})
}
tmp = tmpRangeEnd
exp++
}
exp--
// Decreasing down to lowest power
for exp >= 1 {
tmpRangeEnd := roundDownToNearest10Multiple(rangeEnd, exp)
if tmp != tmpRangeEnd {
ranges = append(ranges, numRange{tmp, tmpRangeEnd - 1})
}
tmp = tmpRangeEnd
exp--
}
// Last range - tmp to rangeEnd
ranges = append(ranges, numRange{tmp, rangeEnd})
regex := string(NONCAPLPAREN_CHAR)
// Generate the regex
for i, rg := range ranges {
if i > 0 {
regex += "|"
}
regex += string(NONCAPLPAREN_CHAR)
startSlc := intToSlc(rg.start)
endSlc := intToSlc(rg.end)
if len(startSlc) != len(endSlc) {
return "", fmt.Errorf("Error parsing numeric range")
}
for i := range startSlc {
if startSlc[i] == endSlc[i] {
regex += string(rune(startSlc[i] + 48)) // '0' is ascii value 48, 1 is 49 etc. To convert the digit to its character form, we can just add 48.
} else {
regex += fmt.Sprintf("%c%c-%c%c", LBRACKET, rune(startSlc[i]+48), rune(endSlc[i]+48), RBRACKET)
}
}
regex += ")"
}
regex += ")"
return regex, nil
}

741
regex/re_test.go Normal file
View File

@@ -0,0 +1,741 @@
package regex
import (
"fmt"
"slices"
"testing"
)
var reTests = []struct {
re string
flags []ReFlag
str string
result []Group // Stores all zero-groups in the match
}{
{"a", nil, "abc", []Group{{0, 1}}},
{"a", nil, "bca", []Group{{2, 3}}},
{"l", nil, "ggllgg", []Group{{2, 3}, {3, 4}}},
{"(b|c)", nil, "abdceb", []Group{{1, 2}, {3, 4}, {5, 6}}},
{"a+", nil, "brerereraaaaabbbbb", []Group{{8, 13}}},
{"ab+", nil, "qweqweqweaqweqweabbbbbr", []Group{{16, 22}}},
{"(b|c|A)", nil, "ooaoobocA", []Group{{5, 6}, {7, 8}, {8, 9}}},
{"ab*", nil, "a", []Group{{0, 1}}},
{"ab*", nil, "abb", []Group{{0, 3}}},
{"a*b", nil, "aaab", []Group{{0, 4}}},
{"a*b", nil, "qwqw", []Group{}},
{"(abc)*", nil, "abcabcabc", []Group{{0, 9}, {9, 9}}},
{"((abc)|(def))*", nil, "abcdef", []Group{{0, 6}, {6, 6}}},
{"(abc)*|(def)*", nil, "abcdef", []Group{{0, 3}, {3, 6}, {6, 6}}},
{"b*a*a", nil, "bba", []Group{{0, 3}}},
{"(ab)+", nil, "abcabddd", []Group{{0, 2}, {3, 5}}},
{"a(b(c|d)*)*", nil, "abccbd", []Group{{0, 6}}},
{"a(b|c)*d+", nil, "abccdd", []Group{{0, 6}}},
{"a*", nil, "", []Group{{0, 0}}},
{"a|b", nil, "c", []Group{}},
{"(a|b)*c", nil, "aabbc", []Group{{0, 5}}},
{"a(b|b)", nil, "ab", []Group{{0, 2}}},
{"a*", nil, "aaaaaaaa", []Group{{0, 8}, {8, 8}}},
{"ab?", nil, "ab", []Group{{0, 2}}},
{"a?b", nil, "ab", []Group{{0, 2}}},
{"a?", nil, "", []Group{{0, 0}}},
{"a?b?c", nil, "a", []Group{}},
{"a?b?c?", nil, "ab", []Group{{0, 2}, {2, 2}}},
{"a?b?c?", nil, "ac", []Group{{0, 2}, {2, 2}}},
{"a?b?c", nil, "abc", []Group{{0, 3}}},
{"a?b?c", nil, "acb", []Group{{0, 2}}},
{"[abc]", nil, "defadefbdefce", []Group{{3, 4}, {7, 8}, {11, 12}}},
{"[ab]c", nil, "ab", []Group{}},
{"g[ab]c", nil, "gac", []Group{{0, 3}}},
{"g[ab]c", nil, "gbc", []Group{{0, 3}}},
{"g[ab]c", nil, "gc", []Group{}},
{"g[ab]c", nil, "gfc", []Group{}},
{"[ab]*", nil, "aabbbabaababab", []Group{{0, 14}, {14, 14}}},
{"[ab]+", nil, "aabbbablaababab", []Group{{0, 7}, {8, 15}}},
{"[Ff]r[Uu]it", nil, "fruit", []Group{{0, 5}}},
{"[Ff]r[Uu]it", nil, "FrUit", []Group{{0, 5}}},
{"[Ff]r[Uu|]it", nil, "Fr|it", []Group{{0, 5}}},
{"[Ff]r([Uu]|[pP])it", nil, "Frpit", []Group{{0, 5}}},
{"[Ff]r[Uu]|[pP]it", nil, "Frpit", []Group{{2, 5}}},
{"[a-zA-Z]+", nil, "Hello, how is it going?", []Group{{0, 5}, {7, 10}, {11, 13}, {14, 16}, {17, 22}}},
{".+", nil, "Hello, how is it going?", []Group{{0, 23}}},
{"a.", nil, "a ", []Group{{0, 2}}},
{"a.b", nil, "a/b", []Group{{0, 3}}},
{".", nil, "a ", []Group{{0, 1}, {1, 2}}},
{"a.", nil, "a ", []Group{{0, 2}}},
{".+b", nil, "abc", []Group{{0, 2}}},
{`\d`, nil, "1a0a3s'''34343s", []Group{{0, 1}, {2, 3}, {4, 5}, {9, 10}, {10, 11}, {11, 12}, {12, 13}, {13, 14}}},
{`\\`, nil, `a\b\c\qwe\`, []Group{{1, 2}, {3, 4}, {5, 6}, {9, 10}}},
{`\W`, nil, `"Hello", he said. How are you doing?`, []Group{{0, 1}, {6, 7}, {7, 8}, {8, 9}, {11, 12}, {16, 17}, {17, 18}, {21, 22}, {25, 26}, {29, 30}, {35, 36}}},
{`\w`, nil, ";';';';';'qwe12", []Group{{10, 11}, {11, 12}, {12, 13}, {13, 14}, {14, 15}}},
{`\s`, nil, "a b c d", []Group{{1, 2}, {3, 4}, {5, 6}, {6, 7}}},
{`\<`, nil, "<HTML><body>", []Group{{0, 1}, {6, 7}}},
{`\(.+\)`, nil, "Not (paranthesized), (so) is (this) not", []Group{{4, 35}}},
{"[^abc]+", nil, "qarbtopsaplpclkpasdmb prejip0r,p", []Group{{0, 1}, {2, 3}, {4, 8}, {9, 12}, {13, 16}, {17, 20}, {21, 32}}},
{"[^a]+", nil, "qqqaq", []Group{{0, 3}, {4, 5}}},
{"[^0-9]+", nil, "a1b2c3dd", []Group{{0, 1}, {2, 3}, {4, 5}, {6, 8}}},
{"[^abc]+", nil, "ababababbababaccacacacaca", []Group{}},
{`\[`, nil, "a[b[c[]]]", []Group{{1, 2}, {3, 4}, {5, 6}}},
{`\([^)]+\)`, nil, "Not (paranthesized), (so) is (this) not", []Group{{4, 19}, {21, 25}, {29, 35}}},
{"^ab", nil, "ab bab", []Group{{0, 2}}},
{"^aaaa^", nil, "aaaaaaaa", []Group{}},
{"^([bB][Gg])", nil, "bG", []Group{{0, 2}}},
{"b$", nil, "ba", []Group{}},
{"(boy|girl)$", nil, "girlf", []Group{}},
{`\bint\b`, nil, "print int integer", []Group{{6, 9}}},
{`int\b`, nil, "ints", []Group{}},
{`int(\b|a)`, nil, "inta", []Group{{0, 4}}},
{`\b\d+\b`, nil, "511 a3 43", []Group{{0, 3}, {7, 9}}},
{`\Bint\B`, nil, "prints int integer print", []Group{{2, 5}}},
{`^`, nil, "5^3^2", []Group{{0, 0}}},
{`\^`, nil, "5^3^2", []Group{{1, 2}, {3, 4}}},
{`pool$`, nil, "pool carpool", []Group{{8, 12}}},
{`^int$`, nil, "print int integer", []Group{}},
{`^int$`, nil, "int", []Group{{0, 3}}},
{`b*`, nil, "aaaaaaaaaaqweqwe", []Group{{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {7, 7}, {8, 8}, {9, 9}, {10, 10}, {11, 11}, {12, 12}, {13, 13}, {14, 14}, {15, 15}, {16, 16}}},
{"a{4}", nil, "aabaaa", []Group{}},
{"ab{5}", nil, "abbbbbab", []Group{{0, 6}}},
{"(a|b){3,4}", nil, "aba", []Group{{0, 3}}},
{"(a|b){3,4}", nil, "ababaa", []Group{{0, 4}}},
{"(bc){5,}", nil, "bcbcbcbcbcbcbcbc", []Group{{0, 16}}},
{`\d{3,4}`, nil, "1209", []Group{{0, 4}}},
{`\d{3,4}`, nil, "109", []Group{{0, 3}}},
{`\d{3,4}`, nil, "5", []Group{}},
{`\d{3,4}`, nil, "123135", []Group{{0, 4}}},
{`\d{3,4}`, nil, "89a-0", []Group{}},
{`\d{3,4}`, nil, "ababab555", []Group{{6, 9}}},
{`\bpaint\b`, nil, "paints", []Group{}},
{`\b\w{5}\b`, nil, "paint", []Group{{0, 5}}},
{`[^\w]`, nil, "abcdef1230[]qq';;'", []Group{{10, 11}, {11, 12}, {14, 15}, {15, 16}, {16, 17}, {17, 18}}},
{`[^\W]`, nil, "abcdef1230[]qq';;'", []Group{{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {6, 7}, {7, 8}, {8, 9}, {9, 10}, {12, 13}, {13, 14}}},
{`[\[\]]`, nil, "a[b[l]]", []Group{{1, 2}, {3, 4}, {5, 6}, {6, 7}}},
// Unicode tests
{`.+`, nil, "úïäö´«åæïëòöê»éãçâï«úïòíñ", []Group{{0, 25}}},
{`a.b`, nil, "a²b", []Group{{0, 3}}},
{`[^a]+`, nil, "úïäö´«åæïëòöê»éãçâï«úïòíñ", []Group{{0, 25}}},
// Fun experiment - AI-generated tests
{"(abc|def|ghi)", nil, "abcdefg", []Group{{0, 3}, {3, 6}}},
{"a(b|c)d", nil, "abcd", []Group{}},
{"a(b|c)*d", nil, "abcbcd", []Group{{0, 6}}},
{"a(b|c)+d", nil, "abcbcd", []Group{{0, 6}}},
{"a(b|c)?d", nil, "abd", []Group{{0, 3}}},
{".+", nil, "hello world", []Group{{0, 11}}},
{"a.b", nil, "aXb", []Group{{0, 3}}},
{"a.*b", nil, "aXb", []Group{{0, 3}}},
{"a.{2,3}b", nil, "aXXb", []Group{{0, 4}}},
{"a.{2,}b", nil, "aXXXb", []Group{{0, 5}}},
{"a.{0,3}b", nil, "ab", []Group{{0, 2}}},
{"[abc]+", nil, "abcabc", []Group{{0, 6}}},
{"[a-zA-Z]+", nil, "HelloWorld", []Group{{0, 10}}},
{"[^abc]+", nil, "defghi", []Group{{0, 6}}},
{"^hello", nil, "hello world", []Group{{0, 5}}},
{"world$", nil, "hello world", []Group{{6, 11}}},
{`\bhello\b`, nil, "hello world", []Group{{0, 5}}},
{`\Bhello\B`, nil, "hello world", []Group{}},
{"(hello|world)", nil, "hello world", []Group{{0, 5}, {6, 11}}},
{"(hello|world)+", nil, "hello world", []Group{{0, 5}, {6, 11}}},
{"(hello|world)*", nil, "hello world", []Group{{0, 5}, {5, 5}, {6, 11}, {11, 11}}},
{"(hello|world)?", nil, "hello world", []Group{{0, 5}, {5, 5}, {6, 11}, {11, 11}}},
{"ú.+ï", nil, "úïäö´«åæïëòöê»éãçâï«úïòíñ", []Group{{0, 22}}},
{"(?=hello)", nil, "hello world", []Group{{0, 0}}},
{"(?!hello)", nil, "hello world", []Group{{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {7, 7}, {8, 8}, {9, 9}, {10, 10}, {11, 11}}},
{"(?<=hello)", nil, "hello world", []Group{{5, 5}}},
{"(?<!hello)", nil, "hello world", []Group{{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {6, 6}, {7, 7}, {8, 8}, {9, 9}, {10, 10}, {11, 11}}},
{"^((3[7-9])|([4-9][0-9])|([1-9][0-9][0-9])|(1000))$", nil, "40", []Group{{0, 2}}},
{"^((3[7-9])|([4-9][0-9])|([1-9][0-9][0-9])|(1000))$", nil, "040", []Group{}},
{"^((3[7-9])|([4-9][0-9])|([1-9][0-9][0-9])|(1000))$", nil, "400", []Group{{0, 3}}},
{"^((3[7-9])|([4-9][0-9])|([1-9][0-9][0-9])|(1000))$", nil, "4000", []Group{}},
{"a{1,3}", nil, "aaaaa", []Group{{0, 3}, {3, 5}}},
{`\\[ab\\]`, nil, "a", []Group{}},
{`\\[ab\\]`, nil, `\a`, []Group{{0, 2}}},
// Lookaround tests
{"(?<=bo)y", nil, "boy", []Group{{2, 3}}},
{"bo(?=y)", nil, "boy", []Group{{0, 2}}},
{"(?<=f)f+(?=f)", nil, "fffff", []Group{{1, 4}}},
{"(?<=f)f+(?=f)", nil, "fffffa", []Group{{1, 4}}},
// Some POSIX charclass tests
{"[[:lower:]]+", nil, "abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPRQSTUVWXYZ0123456789!@#$%^&*", []Group{{0, 26}}},
{"[[:upper:]]+", nil, "abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPRQSTUVWXYZ0123456789!@#$%^&*", []Group{{26, 52}}},
{"[[:alpha:]]+", nil, "abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPRQSTUVWXYZ0123456789!@#$%^&*", []Group{{0, 52}}},
{"[[:digit:]]+", nil, "abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPRQSTUVWXYZ0123456789!@#$%^&*", []Group{{52, 62}}},
{"[[:alnum:]]+", nil, "abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPRQSTUVWXYZ0123456789!@#$%^&*", []Group{{0, 62}}},
{"[[:punct:]]+", nil, "abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPRQSTUVWXYZ0123456789!@#$%^&*", []Group{{62, 70}}},
{"[[:ascii:]]+", nil, "abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPRQSTUVWXYZ0123456789!@#$%^&*", []Group{{0, 70}}},
{"[[:graph:]]+", nil, "abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPRQSTUVWXYZ0123456789!@#$%^&*", []Group{{0, 70}}},
// Test cases from Python's RE test suite
{`[\1]`, nil, "\x01", []Group{{0, 1}}},
{`\0`, nil, "\x00", []Group{{0, 1}}},
{`[\0a]`, nil, "\x00", []Group{{0, 1}}},
{`[\0a]`, nil, "\x00", []Group{{0, 1}}},
{`[a\0]`, nil, "\x00", []Group{{0, 1}}},
{`[^a\0]`, nil, "\x00", []Group{}},
{`\a[\b]\f\n\r\t\v`, nil, "\a\b\f\n\r\t\v", []Group{{0, 7}}},
{`[\a][\b][\f][\n][\r][\t][\v]`, nil, "\a\b\f\n\r\t\v", []Group{{0, 7}}},
{`\u`, nil, "", nil},
{`\xff`, nil, "ÿ", []Group{{0, 1}}},
{`\x00ffffffffffffff`, nil, "\xff", []Group{}},
{`\x00f`, nil, "\x0f", []Group{}},
{`\x00fe`, nil, "\xfe", []Group{}},
{`^\w+=(\\[\000-\277]|[^\n\\])*`, nil, "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c", []Group{{0, 32}}},
{`a.b`, nil, `acb`, []Group{{0, 3}}},
{`a.b`, nil, "a\nb", []Group{}},
{`a.*b`, nil, "acc\nccb", []Group{}},
{`a.{4,5}b`, nil, "acc\nccb", []Group{}},
{`a.b`, nil, "a\rb", []Group{{0, 3}}},
{`a.b`, []ReFlag{RE_SINGLE_LINE}, "a\nb", []Group{{0, 3}}},
{`a.*b`, []ReFlag{RE_SINGLE_LINE}, "acc\nccb", []Group{{0, 7}}},
{`a.{4,5}b`, []ReFlag{RE_SINGLE_LINE}, "acc\nccb", []Group{{0, 7}}},
{`)`, nil, ``, nil},
{`^$`, nil, ``, []Group{{0, 0}}},
{`abc`, nil, `abc`, []Group{{0, 3}}},
{`abc`, nil, `xbc`, []Group{}},
{`abc`, nil, `axc`, []Group{}},
{`abc`, nil, `abx`, []Group{}},
{`abc`, nil, `xabcy`, []Group{{1, 4}}},
{`abc`, nil, `ababc`, []Group{{2, 5}}},
{`ab*c`, nil, `abc`, []Group{{0, 3}}},
{`ab*bc`, nil, `abc`, []Group{{0, 3}}},
{`ab*bc`, nil, `abbc`, []Group{{0, 4}}},
{`ab*bc`, nil, `abbbbc`, []Group{{0, 6}}},
{`ab{0,}c`, nil, `abbbbc`, []Group{{0, 6}}},
{`ab+bc`, nil, `abbc`, []Group{{0, 4}}},
{`ab+bc`, nil, `abc`, []Group{}},
{`ab+bc`, nil, `abq`, []Group{}},
{`ab{1,}bc`, nil, `abq`, []Group{}},
{`ab+bc`, nil, `abbbbc`, []Group{{0, 6}}},
{`ab{1,}bc`, nil, `abbbbc`, []Group{{0, 6}}},
{`ab{1,3}bc`, nil, `abbbbc`, []Group{{0, 6}}},
{`ab{3,4}bc`, nil, `abbbbc`, []Group{{0, 6}}},
{`ab{4,5}bc`, nil, `abbbbc`, []Group{}},
{`ab?bc`, nil, `abbc`, []Group{{0, 4}}},
{`ab?bc`, nil, `abc`, []Group{{0, 3}}},
{`ab{0,1}bc`, nil, `abc`, []Group{{0, 3}}},
{`ab?bc`, nil, `abbbbc`, []Group{}},
{`ab?c`, nil, `abc`, []Group{{0, 3}}},
{`^abc$`, nil, `abc`, []Group{{0, 3}}},
{`^abc$`, nil, `abcc`, []Group{}},
{`^abc`, nil, `abcc`, []Group{{0, 3}}},
{`^abc$`, nil, `aabc`, []Group{}},
{`abc$`, nil, `aabc`, []Group{{1, 4}}},
{`^`, nil, `abc`, []Group{{0, 0}}},
{`$`, nil, `abc`, []Group{{3, 3}}},
{`a.c`, nil, `abc`, []Group{{0, 3}}},
{`a.c`, nil, `axc`, []Group{{0, 3}}},
{`a.*c`, nil, `axyzc`, []Group{{0, 5}}},
{`a.*c`, nil, `axyzd`, []Group{}},
{`a[bc]d`, nil, `abc`, []Group{}},
{`a[bc]d`, nil, `abd`, []Group{{0, 3}}},
{`a[b-d]e`, nil, `abd`, []Group{}},
{`a[b-d]e`, nil, `ace`, []Group{{0, 3}}},
{`a[b-d]`, nil, `aac`, []Group{{1, 3}}},
{`a[-b]`, nil, `a-`, []Group{{0, 2}}}, // If a character class has a hyphen without a start or end character, it is treated as a literal hyphen
{`a[\-b]`, nil, `a-`, []Group{{0, 2}}},
{`a[b-]`, nil, `a-`, []Group{{0, 2}}}, // If a character class has a hyphen without a start or end character, it is treated as a literal hyphen
{`a[]b`, nil, `-`, nil},
{`a[`, nil, `-`, nil},
{`a\`, nil, `-`, nil},
{`abc)`, nil, `-`, nil},
{`(abc`, nil, `-`, nil},
{`a]`, nil, `a]`, []Group{{0, 2}}},
{`a[]]b`, nil, `a]b`, []Group{{0, 3}}},
{`a[\]]b`, nil, `a]b`, []Group{{0, 3}}},
{`a[^bc]d`, nil, `aed`, []Group{{0, 3}}},
{`a[^bc]d`, nil, `abd`, []Group{}},
{`a[^-b]c`, nil, `adc`, []Group{{0, 3}}},
{`a[^-b]c`, nil, `a-c`, []Group{}},
{`a[^]b]c`, nil, `a]c`, []Group{}},
{`a[^]b]c`, nil, `adc`, []Group{{0, 3}}},
{`\ba\b`, nil, `a-`, []Group{{0, 1}}},
{`\ba\b`, nil, `-a`, []Group{{1, 2}}},
{`\ba\b`, nil, `-a-`, []Group{{1, 2}}},
{`\by\b`, nil, `xy`, []Group{}},
{`\by\b`, nil, `yz`, []Group{}},
{`\by\b`, nil, `xyz`, []Group{}},
{`x\b`, nil, `xyz`, []Group{}},
{`x\B`, nil, `xyz`, []Group{{0, 1}}},
{`\Bz`, nil, `xyz`, []Group{{2, 3}}},
{`z\B`, nil, `xyz`, []Group{}},
{`\Bx`, nil, `xyz`, []Group{}},
{`\Ba\B`, nil, `a-`, []Group{}},
{`\Ba\B`, nil, `-a`, []Group{}},
{`\Ba\B`, nil, `-a-`, []Group{}},
{`\By\B`, nil, `xy`, []Group{}},
{`\By\B`, nil, `yz`, []Group{}},
{`\By\b`, nil, `xy`, []Group{{1, 2}}},
{`\by\B`, nil, `yz`, []Group{{0, 1}}},
{`\By\B`, nil, `xyz`, []Group{{1, 2}}},
{`ab|cd`, nil, `abc`, []Group{{0, 2}}},
{`ab|cd`, nil, `abcd`, []Group{{0, 2}, {2, 4}}},
{`$b`, nil, `b`, []Group{}},
{`a\(b`, nil, `a(b`, []Group{{0, 3}}},
{`a\(*b`, nil, `ab`, []Group{{0, 2}}},
{`a\(*b`, nil, `a((b`, []Group{{0, 4}}},
{`a\\b`, nil, `a\b`, []Group{{0, 3}}},
{`a+b+c`, nil, `aabbabc`, []Group{{4, 7}}},
{`a{1,}b{1,}c`, nil, `aabbabc`, []Group{{4, 7}}},
{`)(`, nil, `-`, nil},
{`[^ab]*`, nil, `cde`, []Group{{0, 3}, {3, 3}}},
{`abc`, nil, ``, []Group{}},
{`a*`, nil, ``, []Group{{0, 0}}},
{`a|b|c|d|e`, nil, `e`, []Group{{0, 1}}},
{`abcd*efg`, nil, `abcdefg`, []Group{{0, 7}}},
{`ab*`, nil, `xabyabbbz`, []Group{{1, 3}, {4, 8}}},
{`ab*`, nil, `xayabbbz`, []Group{{1, 2}, {3, 7}}},
{`[abhgefdc]ij`, nil, `hij`, []Group{{0, 3}}},
{`a[bcd]*dcdcde`, nil, `adcdcde`, []Group{{0, 7}}},
{`a[bcd]+dcdcde`, nil, `adcdcde`, []Group{}},
{`[a-zA-Z_][a-zA-Z0-9_]*`, nil, `alpha`, []Group{{0, 5}}},
{`multiple words of text`, nil, `uh-uh`, []Group{}},
{`multiple words`, nil, `multiple words, yeah`, []Group{{0, 14}}},
{`[k]`, nil, `ab`, []Group{}},
{`a[-]?c`, nil, `ac`, []Group{{0, 2}}},
{`^(.+)?B`, nil, `AB`, []Group{{0, 2}}},
{`\0009`, nil, "\x009", []Group{{0, 2}}},
{`\141`, nil, "a", []Group{{0, 1}}},
// At this point, the python test suite has a bunch
// of backreference tests. Since my engine doesn't
// implement backreferences, I've skipped those tests.
{`*a`, nil, ``, nil},
{`(*)b`, nil, ``, nil},
{`a**`, nil, ``, nil},
{`^`, nil, `abc`, []Group{{0, 0}}},
{`$`, nil, `abc`, []Group{{3, 3}}},
{`a[b-]`, nil, `a-`, []Group{{0, 2}}},
{`a[b-a]`, nil, `a-`, nil},
// Case-insensitive matching tests
{`abc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{{0, 3}}},
{`abc`, []ReFlag{RE_CASE_INSENSITIVE}, `XBC`, []Group{}},
{`abc`, []ReFlag{RE_CASE_INSENSITIVE}, `AXC`, []Group{}},
{`abc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABX`, []Group{}},
{`abc`, []ReFlag{RE_CASE_INSENSITIVE}, `XABCY`, []Group{{1, 4}}},
{`abc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABABC`, []Group{{2, 5}}},
{`ab*c`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{{0, 3}}},
{`ab*bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{{0, 3}}},
{`ab*bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBC`, []Group{{0, 4}}},
{`ab*bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBBBC`, []Group{{0, 6}}},
{`ab{0,}c`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBBBC`, []Group{{0, 6}}},
{`ab+bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBC`, []Group{{0, 4}}},
{`ab+bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{}},
{`ab+bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABQ`, []Group{}},
{`ab{1,}bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABQ`, []Group{}},
{`ab+bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBBBC`, []Group{{0, 6}}},
{`ab{1,}bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBBBC`, []Group{{0, 6}}},
{`ab{1,3}bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBBBC`, []Group{{0, 6}}},
{`ab{3,4}bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBBBC`, []Group{{0, 6}}},
{`ab{4,5}bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBBBC`, []Group{}},
{`ab?bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBC`, []Group{{0, 4}}},
{`ab?bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{{0, 3}}},
{`ab{0,1}bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{{0, 3}}},
{`ab?bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBBBC`, []Group{}},
{`ab?c`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{{0, 3}}},
{`^abc$`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{{0, 3}}},
{`^abc$`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCC`, []Group{}},
{`^abc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCC`, []Group{{0, 3}}},
{`^abc$`, []ReFlag{RE_CASE_INSENSITIVE}, `AABC`, []Group{}},
{`abc$`, []ReFlag{RE_CASE_INSENSITIVE}, `AABC`, []Group{{1, 4}}},
{`^`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{{0, 0}}},
{`$`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{{3, 3}}},
{`a.c`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{{0, 3}}},
{`a.c`, []ReFlag{RE_CASE_INSENSITIVE}, `AXC`, []Group{{0, 3}}},
{`a.*c`, []ReFlag{RE_CASE_INSENSITIVE}, `AXYZC`, []Group{{0, 5}}},
{`a.*c`, []ReFlag{RE_CASE_INSENSITIVE}, `AXYZD`, []Group{}},
{`a[bc]d`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{}},
{`a[bc]d`, []ReFlag{RE_CASE_INSENSITIVE}, `ABD`, []Group{{0, 3}}},
{`a[b-d]e`, []ReFlag{RE_CASE_INSENSITIVE}, `ABD`, []Group{}},
{`a[b-d]e`, []ReFlag{RE_CASE_INSENSITIVE}, `ACE`, []Group{{0, 3}}},
{`a[b-d]`, []ReFlag{RE_CASE_INSENSITIVE}, `AAC`, []Group{{1, 3}}},
{`a[-b]`, []ReFlag{RE_CASE_INSENSITIVE}, `A-`, []Group{{0, 2}}}, // If a character class has a hyphen without a start or end character, it is treated as a literal hyphen
{`a[\-b]`, []ReFlag{RE_CASE_INSENSITIVE}, `A-`, []Group{{0, 2}}},
{`a[b-]`, []ReFlag{RE_CASE_INSENSITIVE}, `A-`, []Group{{0, 2}}}, // If a character class has a hyphen without a start or end character, it is treated as a literal hyphen
{`a[]b`, []ReFlag{RE_CASE_INSENSITIVE}, `-`, nil},
{`a[`, []ReFlag{RE_CASE_INSENSITIVE}, `-`, nil},
{`a\`, []ReFlag{RE_CASE_INSENSITIVE}, `-`, nil},
{`abc)`, []ReFlag{RE_CASE_INSENSITIVE}, `-`, nil},
{`(abc`, []ReFlag{RE_CASE_INSENSITIVE}, `-`, nil},
{`a]`, []ReFlag{RE_CASE_INSENSITIVE}, `A]`, []Group{{0, 2}}},
{`a[]]b`, []ReFlag{RE_CASE_INSENSITIVE}, `A]B`, []Group{{0, 3}}},
{`a[\]]b`, []ReFlag{RE_CASE_INSENSITIVE}, `A]B`, []Group{{0, 3}}},
{`a[^bc]d`, []ReFlag{RE_CASE_INSENSITIVE}, `AED`, []Group{{0, 3}}},
{`a[^bc]d`, []ReFlag{RE_CASE_INSENSITIVE}, `ABD`, []Group{}},
{`a[^-b]c`, []ReFlag{RE_CASE_INSENSITIVE}, `ADC`, []Group{{0, 3}}},
{`a[^-b]c`, []ReFlag{RE_CASE_INSENSITIVE}, `A-C`, []Group{}},
{`a[^]b]c`, []ReFlag{RE_CASE_INSENSITIVE}, `A]C`, []Group{}},
{`a[^]b]c`, []ReFlag{RE_CASE_INSENSITIVE}, `ADC`, []Group{{0, 3}}},
{`\ba\b`, []ReFlag{RE_CASE_INSENSITIVE}, `A-`, []Group{{0, 1}}},
{`\ba\b`, []ReFlag{RE_CASE_INSENSITIVE}, `-A`, []Group{{1, 2}}},
{`\ba\b`, []ReFlag{RE_CASE_INSENSITIVE}, `-A-`, []Group{{1, 2}}},
{`\by\b`, []ReFlag{RE_CASE_INSENSITIVE}, `XY`, []Group{}},
{`\by\b`, []ReFlag{RE_CASE_INSENSITIVE}, `YZ`, []Group{}},
{`\by\b`, []ReFlag{RE_CASE_INSENSITIVE}, `XYZ`, []Group{}},
{`x\b`, []ReFlag{RE_CASE_INSENSITIVE}, `XYZ`, []Group{}},
{`x\B`, []ReFlag{RE_CASE_INSENSITIVE}, `XYZ`, []Group{{0, 1}}},
{`\Bz`, []ReFlag{RE_CASE_INSENSITIVE}, `XYZ`, []Group{{2, 3}}},
{`z\B`, []ReFlag{RE_CASE_INSENSITIVE}, `XYZ`, []Group{}},
{`\Bx`, []ReFlag{RE_CASE_INSENSITIVE}, `XYZ`, []Group{}},
{`\Ba\B`, []ReFlag{RE_CASE_INSENSITIVE}, `A-`, []Group{}},
{`\Ba\B`, []ReFlag{RE_CASE_INSENSITIVE}, `-A`, []Group{}},
{`\Ba\B`, []ReFlag{RE_CASE_INSENSITIVE}, `-A-`, []Group{}},
{`\By\B`, []ReFlag{RE_CASE_INSENSITIVE}, `XY`, []Group{}},
{`\By\B`, []ReFlag{RE_CASE_INSENSITIVE}, `YZ`, []Group{}},
{`\By\b`, []ReFlag{RE_CASE_INSENSITIVE}, `XY`, []Group{{1, 2}}},
{`\by\B`, []ReFlag{RE_CASE_INSENSITIVE}, `YZ`, []Group{{0, 1}}},
{`\By\B`, []ReFlag{RE_CASE_INSENSITIVE}, `XYZ`, []Group{{1, 2}}},
{`ab|cd`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Group{{0, 2}}},
{`ab|cd`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCD`, []Group{{0, 2}, {2, 4}}},
{`$b`, []ReFlag{RE_CASE_INSENSITIVE}, `B`, []Group{}},
{`a\(b`, []ReFlag{RE_CASE_INSENSITIVE}, `A(B`, []Group{{0, 3}}},
{`a\(*b`, []ReFlag{RE_CASE_INSENSITIVE}, `AB`, []Group{{0, 2}}},
{`a\(*b`, []ReFlag{RE_CASE_INSENSITIVE}, `A((B`, []Group{{0, 4}}},
{`a\\b`, []ReFlag{RE_CASE_INSENSITIVE}, `A\B`, []Group{{0, 3}}},
{`a+b+c`, []ReFlag{RE_CASE_INSENSITIVE}, `AABBABC`, []Group{{4, 7}}},
{`a{1,}b{1,}c`, []ReFlag{RE_CASE_INSENSITIVE}, `AABBABC`, []Group{{4, 7}}},
{`)(`, []ReFlag{RE_CASE_INSENSITIVE}, `-`, nil},
{`[^ab]*`, []ReFlag{RE_CASE_INSENSITIVE}, `CDE`, []Group{{0, 3}, {3, 3}}},
{`abc`, []ReFlag{RE_CASE_INSENSITIVE}, ``, []Group{}},
{`a*`, []ReFlag{RE_CASE_INSENSITIVE}, ``, []Group{{0, 0}}},
{`a|b|c|d|e`, []ReFlag{RE_CASE_INSENSITIVE}, `E`, []Group{{0, 1}}},
{`abcd*efg`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCDEFG`, []Group{{0, 7}}},
{`ab*`, []ReFlag{RE_CASE_INSENSITIVE}, `XABYABBBZ`, []Group{{1, 3}, {4, 8}}},
{`ab*`, []ReFlag{RE_CASE_INSENSITIVE}, `XAYABBBZ`, []Group{{1, 2}, {3, 7}}},
{`[abhgefdc]ij`, []ReFlag{RE_CASE_INSENSITIVE}, `HIJ`, []Group{{0, 3}}},
{`a[bcd]*dcdcde`, []ReFlag{RE_CASE_INSENSITIVE}, `ADCDCDE`, []Group{{0, 7}}},
{`a[bcd]+dcdcde`, []ReFlag{RE_CASE_INSENSITIVE}, `ADCDCDE`, []Group{}},
{`[a-zA-Z_][a-zA-Z0-9_]*`, []ReFlag{RE_CASE_INSENSITIVE}, `ALPHA`, []Group{{0, 5}}},
{`multiple words of text`, []ReFlag{RE_CASE_INSENSITIVE}, `UH-UH`, []Group{}},
{`multiple words`, []ReFlag{RE_CASE_INSENSITIVE}, `MULTIPLE WORDS, YEAH`, []Group{{0, 14}}},
{`[k]`, []ReFlag{RE_CASE_INSENSITIVE}, `AB`, []Group{}},
{`a[-]?c`, []ReFlag{RE_CASE_INSENSITIVE}, `AC`, []Group{{0, 2}}},
{`^(.+)?B`, []ReFlag{RE_CASE_INSENSITIVE}, `ab`, []Group{{0, 2}}},
{`\0009`, []ReFlag{RE_CASE_INSENSITIVE}, "\x009", []Group{{0, 2}}},
{`\141`, []ReFlag{RE_CASE_INSENSITIVE}, "A", []Group{{0, 1}}},
{`a[-]?c`, []ReFlag{RE_CASE_INSENSITIVE}, `AC`, []Group{{0, 2}}},
{`a(?!b).`, nil, `abad`, []Group{{2, 4}}},
{`a(?=d).`, nil, `abad`, []Group{{2, 4}}},
{`a(?=c|d).`, nil, `abad`, []Group{{2, 4}}},
{`^abc`, nil, "jkl\nabc\nxyz", []Group{}},
{`^abc`, []ReFlag{RE_MULTILINE}, "jkl\nabc\nxyz", []Group{{4, 7}}},
{`abc$`, nil, "jkl\nabc\nxyz", []Group{}},
{`abc$`, []ReFlag{RE_MULTILINE}, "jkl\nabc\nxyz", []Group{{4, 7}}},
{`abc$`, []ReFlag{RE_MULTILINE}, "jkl\n123abc\nxyz", []Group{{7, 10}}},
{`^`, nil, "jkl\n123abc\nxyz", []Group{{0, 0}}},
{`^`, []ReFlag{RE_MULTILINE}, "jkl\n123abc\nxyz", []Group{{0, 0}, {4, 4}, {11, 11}}},
{`$`, nil, "jkl\n123abc\nxyz", []Group{{14, 14}}},
{`$`, []ReFlag{RE_MULTILINE}, "jkl\n123abc\nxyz", []Group{{3, 3}, {10, 10}, {14, 14}}},
{`a.b`, nil, "a\nb", []Group{}},
{`a.b`, []ReFlag{RE_SINGLE_LINE}, "a\nb", []Group{{0, 3}}},
{`\w+`, nil, `--ab_cd0123--`, []Group{{2, 11}}},
{`[\w]+`, nil, `--ab_cd0123--`, []Group{{2, 11}}},
{`\D+`, nil, `1234abc5678`, []Group{{4, 7}}},
{`[\D]+`, nil, `1234abc5678`, []Group{{4, 7}}},
{`[\D5]+`, nil, `1234abc5678`, []Group{{4, 8}}},
{`[\da-fA-F]+`, nil, `123abc`, []Group{{0, 6}}},
{`\xff`, nil, "\u00ff", []Group{{0, 1}}},
{`\xFF`, nil, "\u00ff", []Group{{0, 1}}},
{`\x00ff`, nil, "\u00ff", []Group{}},
{`\x{0000ff}`, nil, "\u00ff", []Group{{0, 1}}},
{`\x{0000FF}`, nil, "\u00ff", []Group{{0, 1}}},
{"\t\n\v\r\f\a", nil, "\t\n\v\r\f\a", []Group{{0, 6}}},
{`\t\n\v\r\f\a`, nil, "\t\n\v\r\f\a", []Group{{0, 6}}},
{`[\t][\n][\v][\r][\f][\b]`, nil, "\t\n\v\r\f\b", []Group{{0, 6}}},
{`.*d`, nil, "abc\nabd", []Group{{4, 7}}},
{`(`, nil, "-", nil},
{`[\41]`, nil, `!`, []Group{{0, 1}}},
{`(?<!abc)(d.f)`, nil, `abcdefdof`, []Group{{6, 9}}},
{`[\w-]+`, nil, `laser_beam`, []Group{{0, 10}}},
{`M+`, []ReFlag{RE_CASE_INSENSITIVE}, `MMM`, []Group{{0, 3}}},
{`m+`, []ReFlag{RE_CASE_INSENSITIVE}, `MMM`, []Group{{0, 3}}},
{`[M]+`, []ReFlag{RE_CASE_INSENSITIVE}, `MMM`, []Group{{0, 3}}},
{`[m]+`, []ReFlag{RE_CASE_INSENSITIVE}, `MMM`, []Group{{0, 3}}},
{`^*`, nil, `-`, nil},
{`a[^>]*b`, nil, `a>b`, []Group{}},
{`^a*$`, nil, `foo`, []Group{}},
// Out-of-bounds for character classes
{`[b-e]`, nil, `a`, []Group{}},
{`[b-e]`, nil, `f`, []Group{}},
{`*?`, nil, `-`, nil},
{`a*?`, nil, `-`, nil}, // non-greedy operators are not supported
// Numeric range tests - this is a feature that I added, and doesn't exist
// in any other mainstream regex engine
{`<0-255>`, nil, `0`, []Group{{0, 1}}},
{`<0-255>`, nil, `7`, []Group{{0, 1}}},
{`<0-255>`, nil, `46`, []Group{{0, 2}}},
{`<0-255>`, nil, `90`, []Group{{0, 2}}},
{`<0-255>`, nil, `107`, []Group{{0, 3}}},
{`<0-255>`, nil, `198`, []Group{{0, 3}}},
{`<0-255>`, nil, `254`, []Group{{0, 3}}},
{`<0-255>`, nil, `255`, []Group{{0, 3}}},
{`<0-255>`, nil, `256`, []Group{{0, 2}, {2, 3}}},
{`^<0-255>$`, nil, `256`, []Group{}},
{`^<0-299792458>$`, nil, `299000999`, []Group{{0, 9}}},
{`^<0-299792458>$`, nil, `299792531`, []Group{}},
{`^<3-0>$`, nil, `-`, nil},
{`^<0-0>$`, nil, `0`, []Group{{0, 1}}},
{`2<0-55>`, nil, `231`, []Group{{0, 3}}},
{`2<0-55>`, nil, `271`, []Group{{0, 2}}},
{`^2<0-55>$`, nil, `271`, []Group{}},
{`<389`, nil, `-`, nil},
{`<389>`, nil, `-`, nil},
{`<-389>`, nil, `-`, nil},
{`<389->`, nil, `-`, nil},
{`<389-400`, nil, `-`, nil},
{`<389-400>`, nil, `391`, []Group{{0, 3}}},
{`\b<1-10000>\b`, nil, `America declared independence in 1776.`, []Group{{33, 37}}},
}
var groupTests = []struct {
re string
flags []ReFlag
str string
result []Match
}{
{"(a)(b)", nil, "ab", []Match{[]Group{{0, 2}, {0, 1}, {1, 2}}}},
{"((a))(b)", nil, "ab", []Match{[]Group{{0, 2}, {0, 1}, {0, 1}, {1, 2}}}},
{"(0)", nil, "ab", []Match{[]Group{}}},
{"(a)b", nil, "ab", []Match{[]Group{{0, 2}, {0, 1}}}},
{"a(b)", nil, "ab", []Match{[]Group{{0, 2}, {1, 2}}}},
{"(a|b)", nil, "ab", []Match{[]Group{{0, 1}, {0, 1}}, []Group{{1, 2}, {1, 2}}}},
{"(a)|(b)", nil, "ab", []Match{[]Group{{0, 1}, {0, 1}, {-1, -1}}, []Group{{1, 2}, {-1, -1}, {1, 2}}}},
{"(a+)(a)", nil, "aaaa", []Match{[]Group{{0, 4}, {0, 3}, {3, 4}}}},
{"(a+)|(a)", nil, "aaaa", []Match{[]Group{{0, 4}, {0, 4}, {-1, -1}}}},
{"(a+)(aa)", nil, "aaaa", []Match{[]Group{{0, 4}, {0, 2}, {2, 4}}}},
{"(aaaa)|(aaaa)", nil, "aaaa", []Match{[]Group{{0, 4}, {0, 4}, {-1, -1}}}},
{"(aaa)|(aaaa)", nil, "aaaa", []Match{[]Group{{0, 4}, {-1, -1}, {0, 4}}}},
{"(aaa)|(aaaa)", nil, "aaaa", []Match{[]Group{{0, 4}, {-1, -1}, {0, 4}}}},
{"(aaaa)|(aaa)", nil, "aaaa", []Match{[]Group{{0, 4}, {0, 4}, {-1, -1}}}},
{"(a)|(aa)", nil, "aa", []Match{[]Group{{0, 2}, {-1, -1}, {0, 2}}}},
{"(a?)a?", nil, "b", []Match{[]Group{{0, 0}, {0, 0}}, []Group{{1, 1}, {1, 1}}}},
{"(a?)a?", nil, "ab", []Match{[]Group{{0, 1}, {0, 1}}, []Group{{1, 1}, {1, 1}}, []Group{{2, 2}, {2, 2}}}},
{"(a?)a?", nil, "aa", []Match{[]Group{{0, 2}, {0, 1}}, []Group{{2, 2}, {2, 2}}}},
{"a((b.d){3})", nil, "abfdbhdbid", []Match{[]Group{{0, 10}, {1, 10}, {7, 10}}}},
// Test cases from Python's RE test suite
{`(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\071`, nil, `abcdefghijkl9`, []Match{[]Group{{0, 13}, {0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {6, 7}, {7, 8}, {8, 9}, {9, 10}, {10, 11}, {11, 12}}}},
{`()ef`, nil, `def`, []Match{[]Group{{1, 3}, {1, 1}}}},
{`(?:)ef`, nil, `def`, []Match{[]Group{{1, 3}}}},
{`(?:)`, nil, `def`, []Match{[]Group{{0, 0}}, []Group{{1, 1}}, []Group{{2, 2}}, []Group{{3, 3}}}},
{`((a))`, nil, `abc`, []Match{[]Group{{0, 1}, {0, 1}, {0, 1}}}},
{`(a)b(c)`, nil, `abc`, []Match{[]Group{{0, 3}, {0, 1}, {2, 3}}}},
{`(a+|b)*`, nil, `ab`, []Match{[]Group{{0, 2}, {1, 2}}, []Group{{2, 2}}}},
{`(a+|b){0,}`, nil, `ab`, []Match{[]Group{{0, 2}, {1, 2}}, []Group{{2, 2}}}},
{`(a+|b)+`, nil, `ab`, []Match{[]Group{{0, 2}, {1, 2}}}},
{`(a+|b){1,}`, nil, `ab`, []Match{[]Group{{0, 2}, {1, 2}}}},
{`(a+|b)?`, nil, `ab`, []Match{[]Group{{0, 1}, {0, 1}}, []Group{{1, 2}, {1, 2}}, []Group{{2, 2}}}},
{`(a+|b){0,1}`, nil, `ab`, []Match{[]Group{{0, 1}, {0, 1}}, []Group{{1, 2}, {1, 2}}, []Group{{2, 2}}}},
{`(a|b|c|d|e)f`, nil, `ef`, []Match{[]Group{{0, 2}, {0, 1}}}},
{`(ab|cd)e`, nil, `abcde`, []Match{[]Group{{2, 5}, {2, 4}}}},
{`^(ab|cd)e`, nil, `abcde`, []Match{}},
{`(abc|)ef`, nil, `abcdef`, []Match{[]Group{{4, 6}, {4, 4}}}},
{`(a|b)c*d`, nil, `abcd`, []Match{[]Group{{1, 4}, {1, 2}}}},
{`(ab|ab*)bc`, nil, `abc`, []Match{[]Group{{0, 3}, {0, 1}}}},
{`a([bc]*)c*`, nil, `abc`, []Match{[]Group{{0, 3}, {1, 3}}}},
{`a([bc]*)(c*d)`, nil, `abcd`, []Match{[]Group{{0, 4}, {1, 3}, {3, 4}}}},
{`a([bc]+)(c*d)`, nil, `abcd`, []Match{[]Group{{0, 4}, {1, 3}, {3, 4}}}},
{`a([bc]*)(c+d)`, nil, `abcd`, []Match{[]Group{{0, 4}, {1, 2}, {2, 4}}}},
{`(ab|a)b*c`, nil, `abc`, []Match{[]Group{{0, 3}, {0, 2}}}},
{`((a)(b)c)(d)`, nil, `abcd`, []Match{[]Group{{0, 4}, {0, 3}, {0, 1}, {1, 2}, {3, 4}}}},
{`^a(bc+|b[eh])g|.h$`, nil, `abh`, []Match{[]Group{{1, 3}}}},
{`(bc+d$|ef*g.|h?i(j|k))`, nil, `effgz`, []Match{[]Group{{0, 5}, {0, 5}}}},
{`(bc+d$|ef*g.|h?i(j|k))`, nil, `ij`, []Match{[]Group{{0, 2}, {0, 2}, {1, 2}}}},
{`(bc+d$|ef*g.|h?i(j|k))`, nil, `effg`, []Match{}},
{`(bc+d$|ef*g.|h?i(j|k))`, nil, `bcdd`, []Match{}},
{`(bc+d$|ef*g.|h?i(j|k))`, nil, `reffgz`, []Match{[]Group{{1, 6}, {1, 6}}}},
{`(((((((((a)))))))))`, nil, `a`, []Match{[]Group{{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}}}},
{`(((((((((a)))))))))\41`, nil, `a`, []Match{[]Group{{0, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}}}},
{`(.*)c(.*)`, nil, `abcde`, []Match{[]Group{{0, 5}, {0, 2}, {3, 5}}}},
{`\((.*), (.*)\)`, nil, `(a, b)`, []Match{[]Group{{0, 6}, {1, 2}, {4, 5}}}},
// At this point, the python test suite has a bunch
// of backreference tests. Since my engine doesn't
// implement backreferences, I've skipped those tests.
{`(a)(b)c|ab`, nil, `ab`, []Match{[]Group{{0, 2}}}},
{`(a)+x`, nil, `aaax`, []Match{[]Group{{0, 4}, {2, 3}}}},
{`([ac])+x`, nil, `aacx`, []Match{[]Group{{0, 4}, {2, 3}}}},
{`([^/]*/)*sub1/`, nil, `d:msgs/tdir/sub1/trial/away.cpp`, []Match{[]Group{{0, 17}, {7, 12}}}},
{`([^.]*)\.([^:]*):[T ]+(.*)`, nil, `track1.title:TBlah blah blah`, []Match{[]Group{{0, 28}, {0, 6}, {7, 12}, {14, 28}}}},
{`([^N]*N)+`, nil, `abNNxyzN`, []Match{[]Group{{0, 8}, {4, 8}}}},
{`([^N]*N)+`, nil, `abNNxyz`, []Match{[]Group{{0, 4}, {3, 4}}}},
{`([abc]*)x`, nil, `abcx`, []Match{[]Group{{0, 4}, {0, 3}}}},
{`([abc]*)x`, nil, `abc`, []Match{}},
{`([xyz]*)x`, nil, `abcx`, []Match{[]Group{{3, 4}, {3, 3}}}},
{`(a)+b|aac`, nil, `aac`, []Match{[]Group{{0, 3}}}},
{`([abc])*d`, nil, `abbbcd`, []Match{[]Group{{0, 6}, {4, 5}}}},
{`([abc])*bcd`, nil, `abcd`, []Match{[]Group{{0, 4}, {0, 1}}}},
{`^(ab|cd)e`, nil, `abcde`, []Match{}},
// Case-insensitive tests
{`(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\071`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCDEFGHIJKL9`, []Match{[]Group{{0, 13}, {0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {6, 7}, {7, 8}, {8, 9}, {9, 10}, {10, 11}, {11, 12}}}},
{`()ef`, []ReFlag{RE_CASE_INSENSITIVE}, `DEF`, []Match{[]Group{{1, 3}, {1, 1}}}},
{`(?:)ef`, []ReFlag{RE_CASE_INSENSITIVE}, `DEF`, []Match{[]Group{{1, 3}}}},
{`(?:)`, []ReFlag{RE_CASE_INSENSITIVE}, `DEF`, []Match{[]Group{{0, 0}}, []Group{{1, 1}}, []Group{{2, 2}}, []Group{{3, 3}}}},
{`((a))`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Match{[]Group{{0, 1}, {0, 1}, {0, 1}}}},
{`(a)b(c)`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Match{[]Group{{0, 3}, {0, 1}, {2, 3}}}},
{`(a+|b)*`, []ReFlag{RE_CASE_INSENSITIVE}, `AB`, []Match{[]Group{{0, 2}, {1, 2}}, []Group{{2, 2}}}},
{`(a+|b){0,}`, []ReFlag{RE_CASE_INSENSITIVE}, `AB`, []Match{[]Group{{0, 2}, {1, 2}}, []Group{{2, 2}}}},
{`(a+|b)+`, []ReFlag{RE_CASE_INSENSITIVE}, `AB`, []Match{[]Group{{0, 2}, {1, 2}}}},
{`(a+|b){1,}`, []ReFlag{RE_CASE_INSENSITIVE}, `AB`, []Match{[]Group{{0, 2}, {1, 2}}}},
{`(a+|b)?`, []ReFlag{RE_CASE_INSENSITIVE}, `AB`, []Match{[]Group{{0, 1}, {0, 1}}, []Group{{1, 2}, {1, 2}}, []Group{{2, 2}}}},
{`(a+|b){0,1}`, []ReFlag{RE_CASE_INSENSITIVE}, `AB`, []Match{[]Group{{0, 1}, {0, 1}}, []Group{{1, 2}, {1, 2}}, []Group{{2, 2}}}},
{`(a|b|c|d|e)f`, []ReFlag{RE_CASE_INSENSITIVE}, `EF`, []Match{[]Group{{0, 2}, {0, 1}}}},
{`(ab|cd)e`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCDE`, []Match{[]Group{{2, 5}, {2, 4}}}},
{`^(ab|cd)e`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCDE`, []Match{}},
{`(abc|)ef`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCDEF`, []Match{[]Group{{4, 6}, {4, 4}}}},
{`(a|b)c*d`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCD`, []Match{[]Group{{1, 4}, {1, 2}}}},
{`(ab|ab*)bc`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Match{[]Group{{0, 3}, {0, 1}}}},
{`a([bc]*)c*`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Match{[]Group{{0, 3}, {1, 3}}}},
{`a([bc]*)(c*d)`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCD`, []Match{[]Group{{0, 4}, {1, 3}, {3, 4}}}},
{`a([bc]+)(c*d)`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCD`, []Match{[]Group{{0, 4}, {1, 3}, {3, 4}}}},
{`a([bc]*)(c+d)`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCD`, []Match{[]Group{{0, 4}, {1, 2}, {2, 4}}}},
{`(ab|a)b*c`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Match{[]Group{{0, 3}, {0, 2}}}},
{`((a)(b)c)(d)`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCD`, []Match{[]Group{{0, 4}, {0, 3}, {0, 1}, {1, 2}, {3, 4}}}},
{`^a(bc+|b[eh])g|.h$`, []ReFlag{RE_CASE_INSENSITIVE}, `ABH`, []Match{[]Group{{1, 3}}}},
{`(bc+d$|ef*g.|h?i(j|k))`, []ReFlag{RE_CASE_INSENSITIVE}, `EFFGZ`, []Match{[]Group{{0, 5}, {0, 5}}}},
{`(bc+d$|ef*g.|h?i(j|k))`, []ReFlag{RE_CASE_INSENSITIVE}, `IJ`, []Match{[]Group{{0, 2}, {0, 2}, {1, 2}}}},
{`(bc+d$|ef*g.|h?i(j|k))`, []ReFlag{RE_CASE_INSENSITIVE}, `EFFG`, []Match{}},
{`(bc+d$|ef*g.|h?i(j|k))`, []ReFlag{RE_CASE_INSENSITIVE}, `BCDD`, []Match{}},
{`(bc+d$|ef*g.|h?i(j|k))`, []ReFlag{RE_CASE_INSENSITIVE}, `reffgz`, []Match{[]Group{{1, 6}, {1, 6}}}},
{`(((((((((a)))))))))`, []ReFlag{RE_CASE_INSENSITIVE}, `A`, []Match{[]Group{{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}}}},
{`(((((((((a)))))))))\41`, []ReFlag{RE_CASE_INSENSITIVE}, `A`, []Match{[]Group{{0, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}}}},
{`(.*)c(.*)`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCDE`, []Match{[]Group{{0, 5}, {0, 2}, {3, 5}}}},
{`\((.*), (.*)\)`, []ReFlag{RE_CASE_INSENSITIVE}, `(A, B)`, []Match{[]Group{{0, 6}, {1, 2}, {4, 5}}}},
{`(a)(b)c|ab`, []ReFlag{RE_CASE_INSENSITIVE}, `AB`, []Match{[]Group{{0, 2}}}},
{`(a)+x`, []ReFlag{RE_CASE_INSENSITIVE}, `AAAX`, []Match{[]Group{{0, 4}, {2, 3}}}},
{`([ac])+x`, []ReFlag{RE_CASE_INSENSITIVE}, `AACX`, []Match{[]Group{{0, 4}, {2, 3}}}},
{`([^/]*/)*sub1/`, []ReFlag{RE_CASE_INSENSITIVE}, `D:MSGS/TDIR/SUB1/TRIAL/AWAY.CPP`, []Match{[]Group{{0, 17}, {7, 12}}}},
{`([^.]*)\.([^:]*):[T ]+(.*)`, []ReFlag{RE_CASE_INSENSITIVE}, `TRACK1.TITLE:TBLAH BLAH BLAH`, []Match{[]Group{{0, 28}, {0, 6}, {7, 12}, {14, 28}}}},
{`([^N]*N)+`, []ReFlag{RE_CASE_INSENSITIVE}, `ABNNXYZN`, []Match{[]Group{{0, 8}, {4, 8}}}},
{`([^N]*N)+`, []ReFlag{RE_CASE_INSENSITIVE}, `ABNNXYZ`, []Match{[]Group{{0, 4}, {3, 4}}}},
{`([abc]*)x`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCX`, []Match{[]Group{{0, 4}, {0, 3}}}},
{`([abc]*)x`, []ReFlag{RE_CASE_INSENSITIVE}, `ABC`, []Match{}},
{`([xyz]*)x`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCX`, []Match{[]Group{{3, 4}, {3, 3}}}},
{`(a)+b|aac`, []ReFlag{RE_CASE_INSENSITIVE}, `AAC`, []Match{[]Group{{0, 3}}}},
{`([abc])*d`, []ReFlag{RE_CASE_INSENSITIVE}, `ABBBCD`, []Match{[]Group{{0, 6}, {4, 5}}}},
{`([abc])*bcd`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCD`, []Match{[]Group{{0, 4}, {0, 1}}}},
{`^(ab|cd)e`, []ReFlag{RE_CASE_INSENSITIVE}, `ABCDE`, []Match{}},
{`(?:(?:(?:(?:(?:(?:a))))))`, nil, `a`, []Match{[]Group{{0, 1}}}},
{`a(?:b|c|d)(.)`, nil, `ace`, []Match{[]Group{{0, 3}, {2, 3}}}},
{`a(?:b|c|d)*(.)`, nil, `ace`, []Match{[]Group{{0, 3}, {2, 3}}}},
{`a(?:b|c|d)+(.)`, nil, `ace`, []Match{[]Group{{0, 3}, {2, 3}}}},
{`a(?:b|(c|e){1,2}?|d)+(.)`, nil, `ace`, []Match{[]Group{{0, 3}, {1, 2}, {2, 3}}}},
{`(?<!-):(.*)(?<!-):`, nil, `a:bc-:de:f`, []Match{[]Group{{1, 9}, {2, 8}}}},
{`(?<!\\):(.*)(?<!\\):`, nil, `a:bc\:de:f`, []Match{[]Group{{1, 9}, {2, 8}}}},
{`(?<!\?)'(.*)(?<!\?)'`, nil, `a'bc?'de'f`, []Match{[]Group{{1, 9}, {2, 8}}}},
{`([\s]*)([\S]*)([\s]*)`, nil, ` testing!1972`, []Match{[]Group{{0, 13}, {0, 1}, {1, 13}, {13, 13}}, []Group{{13, 13}, {13, 13}, {13, 13}, {13, 13}}}},
{`(\s*)(\S*)(\s*)`, nil, ` testing!1972`, []Match{[]Group{{0, 13}, {0, 1}, {1, 13}, {13, 13}}, []Group{{13, 13}, {13, 13}, {13, 13}, {13, 13}}}},
{`(([a-z]+):)?([a-z]+)$`, nil, `smil`, []Match{[]Group{{0, 4}, {-1, -1}, {-1, -1}, {0, 4}}}},
{`(x?)?`, nil, `x`, []Match{[]Group{{0, 1}, {0, 1}}, []Group{{1, 1}, {1, 1}}}},
{`"(?:\\"|[^"])*"`, nil, `"\""`, []Match{[]Group{{0, 4}}}},
{`^((a)c)?(ab)$`, nil, `ab`, []Match{[]Group{{0, 2}, {-1, -1}, {-1, -1}, {0, 2}}}},
{`^([ab]*)(?=(b)?)c`, nil, `abc`, []Match{[]Group{{0, 3}, {0, 2}}}},
{`^([ab]*)(?!(b))c`, nil, `abc`, []Match{[]Group{{0, 3}, {0, 2}}}},
{`^([ab]*)(?<!(a))c`, nil, `abc`, []Match{[]Group{{0, 3}, {0, 2}}}},
{`(<389-400>)`, nil, `391`, []Match{[]Group{{0, 3}, {0, 3}}}},
}
func TestFindAllMatches(t *testing.T) {
for _, test := range reTests {
t.Run(test.re+" "+test.str, func(t *testing.T) {
regComp, err := Compile(test.re, test.flags...)
if err != nil {
if test.result != nil {
panic(fmt.Errorf("Test Error: %v", err))
}
} else {
matchIndices := FindAllMatches(regComp, test.str)
zeroGroups := make([]Group, len(matchIndices))
for i, m := range matchIndices {
zeroGroups[i] = m[0]
}
if !slices.Equal(test.result, zeroGroups) {
t.Errorf("Wanted %v Got %v\n", test.result, zeroGroups)
}
}
})
}
}
func TestFindString(t *testing.T) {
for _, test := range reTests {
t.Run(test.re+" "+test.str, func(t *testing.T) {
regComp, err := Compile(test.re, test.flags...)
if err != nil {
if test.result != nil {
panic(err)
}
} else {
foundString := FindString(regComp, test.str)
if len(test.result) == 0 {
if foundString != "" {
t.Errorf("Expected no match got %v\n", foundString)
}
} else {
expectedString := test.str[test.result[0].StartIdx:test.result[0].EndIdx]
if foundString != expectedString {
t.Errorf("Wanted %v Got %v\n", expectedString, foundString)
}
}
}
})
}
}
func TestFindAllGroups(t *testing.T) {
for _, test := range groupTests {
t.Run(test.re+" "+test.str, func(t *testing.T) {
regComp, err := Compile(test.re, test.flags...)
if err != nil {
if test.result != nil {
panic(err)
}
}
matchIndices := FindAllMatches(regComp, test.str)
for i := range matchIndices {
for j := range matchIndices[i] {
if matchIndices[i][j].isValid() {
if test.result[i][j] != matchIndices[i][j] {
t.Errorf("Wanted %v Got %v\n", test.result, matchIndices)
}
}
}
}
})
}
}

670
regex/re_tests.py Normal file
View File

@@ -0,0 +1,670 @@
#!/usr/bin/env python3
# -*- mode: python -*-
# Re test suite and benchmark suite v1.5
# The 3 possible outcomes for each pattern
[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
# Benchmark suite (needs expansion)
#
# The benchmark suite does not test correctness, just speed. The
# first element of each tuple is the regex pattern; the second is a
# string to match it against. The benchmarking code will embed the
# second string inside several sizes of padding, to test how regex
# matching performs on large strings.
benchmarks = [
# test common prefix
('Python|Perl', 'Perl'), # Alternation
('(Python|Perl)', 'Perl'), # Grouped alternation
('Python|Perl|Tcl', 'Perl'), # Alternation
('(Python|Perl|Tcl)', 'Perl'), # Grouped alternation
('(Python)\\1', 'PythonPython'), # Backreference
('([0a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # Disable the fastmap optimization
('([a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # A few sets
('Python', 'Python'), # Simple text literal
('.*Python', 'Python'), # Bad text literal
('.*Python.*', 'Python'), # Worse text literal
('.*(Python)', 'Python'), # Bad text literal with grouping
]
# Test suite (for verifying correctness)
#
# The test suite is a list of 5- or 3-tuples. The 5 parts of a
# complete tuple are:
# element 0: a string containing the pattern
# 1: the string to match against the pattern
# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
# 3: a string that will be eval()'ed to produce a test string.
# This is an arbitrary Python expression; the available
# variables are "found" (the whole match), and "g1", "g2", ...
# up to "g99" contain the contents of each group, or the
# string 'None' if the group wasn't given a value, or the
# string 'Error' if the group index was out of range;
# also "groups", the return value of m.group() (a tuple).
# 4: The expected result of evaluating the expression.
# If the two don't match, an error is reported.
#
# If the regex isn't expected to work, the latter two elements can be omitted.
tests = [
# Test ?P< and ?P= extensions
('(?P<foo_123', '', SYNTAX_ERROR), # Unterminated group identifier
('(?P<1>a)', '', SYNTAX_ERROR), # Begins with a digit
('(?P<!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
('(?P<foo!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
# Same tests, for the ?P= form
('(?P<foo_123>a)(?P=foo_123', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=1)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=!)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=foo_124', 'aa', SYNTAX_ERROR), # Backref to undefined group
('(?P<foo_123>a)', 'a', SUCCEED, 'g1', 'a'),
('(?P<foo_123>a)(?P=foo_123)', 'aa', SUCCEED, 'g1', 'a'),
# Test octal escapes
('\\1', 'a', SYNTAX_ERROR), # Backreference
('[\\1]', '\1', SUCCEED, 'found', '\1'), # Character
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# Test \0 is handled everywhere
(r'\0', '\0', SUCCEED, 'found', '\0'),
(r'[\0a]', '\0', SUCCEED, 'found', '\0'),
(r'[a\0]', '\0', SUCCEED, 'found', '\0'),
(r'[^a\0]', '\0', FAIL),
# Test various letter escapes
(r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
(r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
# NOTE: not an error under PCRE/PRE:
# (r'\u', '', SYNTAX_ERROR), # A Perl escape
(r'\c\e\g\h\i\j\k\m\o\p\q\y\z', 'ceghijkmopqyz', SUCCEED, 'found', 'ceghijkmopqyz'),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ffffffffffffff', '\377', FAIL, 'found', chr(255)),
(r'\x00f', '\017', FAIL, 'found', chr(15)),
(r'\x00fe', '\376', FAIL, 'found', chr(254)),
# (r'\x00ffffffffffffff', '\377', SUCCEED, 'found', chr(255)),
# (r'\x00f', '\017', SUCCEED, 'found', chr(15)),
# (r'\x00fe', '\376', SUCCEED, 'found', chr(254)),
(r"^\w+=(\\[\000-\277]|[^\n\\])*", "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c",
SUCCEED, 'found', "SRC=eval.c g.c blah blah blah \\\\"),
# Test that . only matches \n in DOTALL mode
('a.b', 'acb', SUCCEED, 'found', 'acb'),
('a.b', 'a\nb', FAIL),
('a.*b', 'acc\nccb', FAIL),
('a.{4,5}b', 'acc\nccb', FAIL),
('a.b', 'a\rb', SUCCEED, 'found', 'a\rb'),
('a.b(?s)', 'a\nb', SUCCEED, 'found', 'a\nb'),
('a.*(?s)b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.{4,5}b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
(')', '', SYNTAX_ERROR), # Unmatched right bracket
('', '', SUCCEED, 'found', ''), # Empty pattern
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found+"-"', '-'),
('$', 'abc', SUCCEED, 'found+"-"', '-'),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[\\-b]', 'a-', SUCCEED, 'found', 'a-'),
# NOTE: not an error under PCRE/PRE:
# ('a[b-]', 'a-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a\\', '-', SYNTAX_ERROR),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('\\ba\\b', 'a-', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a-', SUCCEED, '"-"', '-'),
('\\by\\b', 'xy', FAIL),
('\\by\\b', 'yz', FAIL),
('\\by\\b', 'xyz', FAIL),
('x\\b', 'xyz', FAIL),
('x\\B', 'xyz', SUCCEED, '"-"', '-'),
('\\Bz', 'xyz', SUCCEED, '"-"', '-'),
('z\\B', 'xyz', FAIL),
('\\Bx', 'xyz', FAIL),
('\\Ba\\B', 'a-', FAIL, '"-"', '-'),
('\\Ba\\B', '-a', FAIL, '"-"', '-'),
('\\Ba\\B', '-a-', FAIL, '"-"', '-'),
('\\By\\B', 'xy', FAIL),
('\\By\\B', 'yz', FAIL),
('\\By\\b', 'xy', SUCCEED, '"-"', '-'),
('\\by\\B', 'yz', SUCCEED, '"-"', '-'),
('\\By\\B', 'xyz', SUCCEED, '"-"', '-'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('$b', 'b', FAIL),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL, 'xg1y', 'xy'),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
('(a+).\\1$', 'aaaaa', SUCCEED, 'found+"-"+g1', 'aaaaa-aa'),
('^(a+).\\1$', 'aaaa', FAIL),
('(abc)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('([a-c]+)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)+\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a).+\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(a)ba*\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(aa|a)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a|aa)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a+)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('([abc]*)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)(b)c|ab', 'ab', SUCCEED, 'found+"-"+g1+"-"+g2', 'ab-None-None'),
('(a)+x', 'aaax', SUCCEED, 'found+"-"+g1', 'aaax-a'),
('([ac])+x', 'aacx', SUCCEED, 'found+"-"+g1', 'aacx-c'),
('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED, 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', SUCCEED, 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
('([^N]*N)+', 'abNNxyzN', SUCCEED, 'found+"-"+g1', 'abNNxyzN-xyzN'),
('([^N]*N)+', 'abNNxyz', SUCCEED, 'found+"-"+g1', 'abNN-N'),
('([abc]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'abcx-abc'),
('([abc]*)x', 'abc', FAIL),
('([xyz]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'x-'),
('(a)+b|aac', 'aac', SUCCEED, 'found+"-"+g1', 'aac-None'),
# Test symbolic groups
('(?P<i d>aaa)a', 'aaaa', SYNTAX_ERROR),
('(?P<id>aaa)a', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aaa'),
('(?P<id>aa)(?P=id)', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aa'),
('(?P<id>aa)(?P=xd)', 'aaaa', SYNTAX_ERROR),
# Test octal escapes/memory references
('\\1', 'a', SYNTAX_ERROR),
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# All tests from Perl
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{0,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab{1,}bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,3}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{3,4}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{4,5}bc', 'abbbbc', FAIL),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found', ''),
('$', 'abc', SUCCEED, 'found', ''),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-a]', '-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('*a', '-', SYNTAX_ERROR),
('(*)b', '-', SYNTAX_ERROR),
('$b', 'b', FAIL),
('a\\', '-', SYNTAX_ERROR),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a{1,}b{1,}c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a**', '-', SYNTAX_ERROR),
('a.+?c', 'abcabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){0,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){1,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
('(a+|b){0,1}', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('([abc])*d', 'abbbcd', SUCCEED, 'found+"-"+g1', 'abbbcd-c'),
('([abc])*bcd', 'abcd', SUCCEED, 'found+"-"+g1', 'abcd-a'),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('((((((((((a))))))))))', 'a', SUCCEED, 'g10', 'a'),
('((((((((((a))))))))))\\10', 'aa', SUCCEED, 'found', 'aa'),
# Python does not have the same rules for \\41 so this is a syntax error
# ('((((((((((a))))))))))\\41', 'aa', FAIL),
# ('((((((((((a))))))))))\\41', 'a!', SUCCEED, 'found', 'a!'),
('((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(?i)((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('(?i)abc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'XBC', FAIL),
('(?i)abc', 'AXC', FAIL),
('(?i)abc', 'ABX', FAIL),
('(?i)abc', 'XABCY', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'ABABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab*?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{0,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab+?bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab+bc', 'ABC', FAIL),
('(?i)ab+bc', 'ABQ', FAIL),
('(?i)ab{1,}bc', 'ABQ', FAIL),
('(?i)ab+bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,3}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{3,4}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{4,5}?bc', 'ABBBBC', FAIL),
('(?i)ab??bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab??bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab??bc', 'ABBBBC', FAIL),
('(?i)ab??c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABCC', FAIL),
('(?i)^abc', 'ABCC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'AABC', FAIL),
('(?i)abc$', 'AABC', SUCCEED, 'found', 'ABC'),
('(?i)^', 'ABC', SUCCEED, 'found', ''),
('(?i)$', 'ABC', SUCCEED, 'found', ''),
('(?i)a.c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)a.c', 'AXC', SUCCEED, 'found', 'AXC'),
('(?i)a.*?c', 'AXYZC', SUCCEED, 'found', 'AXYZC'),
('(?i)a.*c', 'AXYZD', FAIL),
('(?i)a[bc]d', 'ABC', FAIL),
('(?i)a[bc]d', 'ABD', SUCCEED, 'found', 'ABD'),
('(?i)a[b-d]e', 'ABD', FAIL),
('(?i)a[b-d]e', 'ACE', SUCCEED, 'found', 'ACE'),
('(?i)a[b-d]', 'AAC', SUCCEED, 'found', 'AC'),
('(?i)a[-b]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-a]', '-', SYNTAX_ERROR),
('(?i)a[]b', '-', SYNTAX_ERROR),
('(?i)a[', '-', SYNTAX_ERROR),
('(?i)a]', 'A]', SUCCEED, 'found', 'A]'),
('(?i)a[]]b', 'A]B', SUCCEED, 'found', 'A]B'),
('(?i)a[^bc]d', 'AED', SUCCEED, 'found', 'AED'),
('(?i)a[^bc]d', 'ABD', FAIL),
('(?i)a[^-b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)a[^-b]c', 'A-C', FAIL),
('(?i)a[^]b]c', 'A]C', FAIL),
('(?i)a[^]b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)ab|cd', 'ABC', SUCCEED, 'found', 'AB'),
('(?i)ab|cd', 'ABCD', SUCCEED, 'found', 'AB'),
('(?i)()ef', 'DEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)*a', '-', SYNTAX_ERROR),
('(?i)(*)b', '-', SYNTAX_ERROR),
('(?i)$b', 'B', FAIL),
('(?i)a\\', '-', SYNTAX_ERROR),
('(?i)a\\(b', 'A(B', SUCCEED, 'found+"-"+g1', 'A(B-Error'),
('(?i)a\\(*b', 'AB', SUCCEED, 'found', 'AB'),
('(?i)a\\(*b', 'A((B', SUCCEED, 'found', 'A((B'),
('(?i)a\\\\b', 'A\\B', SUCCEED, 'found', 'A\\B'),
('(?i)abc)', '-', SYNTAX_ERROR),
('(?i)(abc', '-', SYNTAX_ERROR),
('(?i)((a))', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'A-A-A'),
('(?i)(a)b(c)', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABC-A-C'),
('(?i)a+b+c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a{1,}b{1,}c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a**', '-', SYNTAX_ERROR),
('(?i)a.+?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.*?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.{0,5}?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)(a+|b)*', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){0,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)+', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){1,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)?', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}?', 'AB', SUCCEED, 'found+"-"+g1', '-None'),
('(?i))(', '-', SYNTAX_ERROR),
('(?i)[^ab]*', 'CDE', SUCCEED, 'found', 'CDE'),
('(?i)abc', '', FAIL),
('(?i)a*', '', SUCCEED, 'found', ''),
('(?i)([abc])*d', 'ABBBCD', SUCCEED, 'found+"-"+g1', 'ABBBCD-C'),
('(?i)([abc])*bcd', 'ABCD', SUCCEED, 'found+"-"+g1', 'ABCD-A'),
('(?i)a|b|c|d|e', 'E', SUCCEED, 'found', 'E'),
('(?i)(a|b|c|d|e)f', 'EF', SUCCEED, 'found+"-"+g1', 'EF-E'),
('(?i)abcd*efg', 'ABCDEFG', SUCCEED, 'found', 'ABCDEFG'),
('(?i)ab*', 'XABYABBBZ', SUCCEED, 'found', 'AB'),
('(?i)ab*', 'XAYABBBZ', SUCCEED, 'found', 'A'),
('(?i)(ab|cd)e', 'ABCDE', SUCCEED, 'found+"-"+g1', 'CDE-CD'),
('(?i)[abhgefdc]ij', 'HIJ', SUCCEED, 'found', 'HIJ'),
('(?i)^(ab|cd)e', 'ABCDE', FAIL),
('(?i)(abc|)ef', 'ABCDEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)(a|b)c*d', 'ABCD', SUCCEED, 'found+"-"+g1', 'BCD-B'),
('(?i)(ab|ab*)bc', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-A'),
('(?i)a([bc]*)c*', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-BC'),
('(?i)a([bc]*)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]+)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]*)(c+d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-B-CD'),
('(?i)a[bcd]*dcdcde', 'ADCDCDE', SUCCEED, 'found', 'ADCDCDE'),
('(?i)a[bcd]+dcdcde', 'ADCDCDE', FAIL),
('(?i)(ab|a)b*c', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-AB'),
('(?i)((a)(b)c)(d)', 'ABCD', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'ABC-A-B-D'),
('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', SUCCEED, 'found', 'ALPHA'),
('(?i)^a(bc+|b[eh])g|.h$', 'ABH', SUCCEED, 'found+"-"+g1', 'BH-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', SUCCEED, 'found+"-"+g1+"-"+g2', 'IJ-IJ-J'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)((((((((((a))))))))))', 'A', SUCCEED, 'g10', 'A'),
('(?i)((((((((((a))))))))))\\10', 'AA', SUCCEED, 'found', 'AA'),
#('(?i)((((((((((a))))))))))\\41', 'AA', FAIL),
#('(?i)((((((((((a))))))))))\\41', 'A!', SUCCEED, 'found', 'A!'),
('(?i)(((((((((a)))))))))', 'A', SUCCEED, 'found', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', SUCCEED, 'g1', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', SUCCEED, 'g1', 'C'),
('(?i)multiple words of text', 'UH-UH', FAIL),
('(?i)multiple words', 'MULTIPLE WORDS, YEAH', SUCCEED, 'found', 'MULTIPLE WORDS'),
('(?i)(.*)c(.*)', 'ABCDE', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCDE-AB-DE'),
('(?i)\\((.*), (.*)\\)', '(A, B)', SUCCEED, 'g2+"-"+g1', 'B-A'),
('(?i)[k]', 'AB', FAIL),
# ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', 'ABCD-$&-\\ABCD'),
# ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', 'BC-$1-\\BC'),
('(?i)a[-]?c', 'AC', SUCCEED, 'found', 'AC'),
('(?i)(abc)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('(?i)([a-c]*)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('a(?!b).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=c|d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?:b|c|d)(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)*(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)+?(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', SUCCEED, 'g1 + g2', 'ce'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
# lookbehind: split by : but not if it is escaped by -.
('(?<!-):(.*?)(?<!-):', 'a:bc-:de:f', SUCCEED, 'g1', 'bc-:de' ),
# escaping with \ as we know it
('(?<!\\\):(.*?)(?<!\\\):', 'a:bc\\:de:f', SUCCEED, 'g1', 'bc\\:de' ),
# terminating with ' and escaping with ? as in edifact
("(?<!\\?)'(.*?)(?<!\\?)'", "a'bc?'de'f", SUCCEED, 'g1', "bc?'de" ),
# Comments using the (?#...) syntax
('w(?# comment', 'w', SYNTAX_ERROR),
('w(?# comment 1)xy(?# comment 2)z', 'wxyz', SUCCEED, 'found', 'wxyz'),
# Check odd placement of embedded pattern modifiers
# not an error under PCRE/PRE:
('w(?i)', 'W', SUCCEED, 'found', 'W'),
# ('w(?i)', 'W', SYNTAX_ERROR),
# Comments using the x embedded pattern modifier
("""(?x)w# comment 1
x y
# comment 2
z""", 'wxyz', SUCCEED, 'found', 'wxyz'),
# using the m embedded pattern modifier
('^abc', """jkl
abc
xyz""", FAIL),
('(?m)^abc', """jkl
abc
xyz""", SUCCEED, 'found', 'abc'),
('(?m)abc$', """jkl
xyzabc
123""", SUCCEED, 'found', 'abc'),
# using the s embedded pattern modifier
('a.b', 'a\nb', FAIL),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
# test \w, etc. both inside and outside character classes
('\\w+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('[\\w]+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('\\D+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\D]+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\da-fA-F]+', '123abc', SUCCEED, 'found', '123abc'),
# not an error under PCRE/PRE:
# ('[\\d-x]', '-', SYNTAX_ERROR),
(r'([\s]*)([\S]*)([\s]*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'(\s*)(\S*)(\s*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ff', '\377', FAIL),
# (r'\x00ff', '\377', SUCCEED, 'found', chr(255)),
(r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
('\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
(r'\t\n\v\r\f\a', '\t\n\v\r\f\a', SUCCEED, 'found', chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)),
(r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', SUCCEED, 'found', '\t\n\v\r\f\b'),
#
# post-1.5.2 additions
# xmllib problem
(r'(([a-z]+):)?([a-z]+)$', 'smil', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-smil'),
# bug 110866: reference to undefined group
(r'((.)\1+)', '', SYNTAX_ERROR),
# bug 111869: search (PRE/PCRE fails on this one, SRE doesn't)
(r'.*d', 'abc\nabd', SUCCEED, 'found', 'abd'),
# bug 112468: various expected syntax errors
(r'(', '', SYNTAX_ERROR),
(r'[\41]', '!', SUCCEED, 'found', '!'),
# bug 114033: nothing to repeat
(r'(x?)?', 'x', SUCCEED, 'found', 'x'),
# bug 115040: rescan if flags are modified inside pattern
(r' (?x)foo ', 'foo', SUCCEED, 'found', 'foo'),
# bug 115618: negative lookahead
(r'(?<!abc)(d.f)', 'abcdefdof', SUCCEED, 'found', 'dof'),
# bug 116251: character class bug
(r'[\w-]+', 'laser_beam', SUCCEED, 'found', 'laser_beam'),
# bug 123769+127259: non-greedy backtracking bug
(r'.*?\S *:', 'xx:', SUCCEED, 'found', 'xx:'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
# bug 127259: \Z shouldn't depend on multiline mode
(r'(?ms).*?x\s*\Z(.*)','xx\nx\n', SUCCEED, 'g1', ''),
# bug 128899: uppercase literals under the ignorecase flag
(r'(?i)M+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)m+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[M]+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[m]+', 'MMM', SUCCEED, 'found', 'MMM'),
# bug 130748: ^* should be an error (nothing to repeat)
(r'^*', '', SYNTAX_ERROR),
# bug 133283: minimizing repeat problem
(r'"(?:\\"|[^"])*?"', r'"\""', SUCCEED, 'found', r'"\""'),
# bug 477728: minimizing repeat problem
(r'^.*?$', 'one\ntwo\nthree\n', FAIL),
# bug 483789: minimizing repeat problem
(r'a[^>]*?b', 'a>b', FAIL),
# bug 490573: minimizing repeat problem
(r'^a*?$', 'foo', FAIL),
# bug 470582: nested groups problem
(r'^((a)c)?(ab)$', 'ab', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-ab'),
# another minimizing repeat problem (capturing groups in assertions)
('^([ab]*?)(?=(b)?)c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?!(b))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?<!(a))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
]
u = '\N{LATIN CAPITAL LETTER A WITH DIAERESIS}'
tests.extend([
# bug 410271: \b broken under locales
(r'\b.\b', 'a', SUCCEED, 'found', 'a'),
(r'(?u)\b.\b', u, SUCCEED, 'found', u),
(r'(?u)\w', u, SUCCEED, 'found', u),
])

516
regex/re_tests_uniq.py Normal file
View File

@@ -0,0 +1,516 @@
#!/usr/bin/env python3
# -*- mode: python -*-
# Re test suite and benchmark suite v1.5
# The 3 possible outcomes for each pattern
[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
# Benchmark suite (needs expansion)
#
# The benchmark suite does not test correctness, just speed. The
# first element of each tuple is the regex pattern; the second is a
# string to match it against. The benchmarking code will embed the
# second string inside several sizes of padding, to test how regex
# matching performs on large strings.
benchmarks = [
# test common prefix
('Python|Perl', 'Perl'), # Alternation
('(Python|Perl)', 'Perl'), # Grouped alternation
('Python|Perl|Tcl', 'Perl'), # Alternation
('(Python|Perl|Tcl)', 'Perl'), # Grouped alternation
('(Python)\\1', 'PythonPython'), # Backreference
('([0a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # Disable the fastmap optimization
('([a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # A few sets
('Python', 'Python'), # Simple text literal
('.*Python', 'Python'), # Bad text literal
('.*Python.*', 'Python'), # Worse text literal
('.*(Python)', 'Python'), # Bad text literal with grouping
]
# Test suite (for verifying correctness)
# The test suite is a list of 5- or 3-tuples. The 5 parts of a
# complete tuple are:
# element 0: a string containing the pattern
# 1: the string to match against the pattern
# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
# 3: a string that will be eval()'ed to produce a test string.
# This is an arbitrary Python expression; the available
# variables are "found" (the whole match), and "g1", "g2", ...
# up to "g99" contain the contents of each group, or the
# string 'None' if the group wasn't given a value, or the
# string 'Error' if the group index was out of range;
# also "groups", the return value of m.group() (a tuple).
# 4: The expected result of evaluating the expression.
# If the two don't match, an error is reported.
# If the regex isn't expected to work, the latter two elements can be omitted.
tests = [
# Test ?P< and ?P= extensions
('(?P<foo_123', '', SYNTAX_ERROR), # Unterminated group identifier
('(?P<1>a)', '', SYNTAX_ERROR), # Begins with a digit
('(?P<!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
('(?P<foo!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
# Same tests, for the ?P= form
('(?P<foo_123>a)(?P=foo_123', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=1)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=!)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=foo_124', 'aa', SYNTAX_ERROR), # Backref to undefined group
('(?P<foo_123>a)', 'a', SUCCEED, 'g1', 'a'),
('(?P<foo_123>a)(?P=foo_123)', 'aa', SUCCEED, 'g1', 'a'),
# Test octal escapes
('\\1', 'a', SYNTAX_ERROR), # Backreference
('[\\1]', '\1', SUCCEED, 'found', '\1'), # Character
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# Test \0 is handled everywhere
(r'\0', '\0', SUCCEED, 'found', '\0'),
(r'[\0a]', '\0', SUCCEED, 'found', '\0'),
(r'[a\0]', '\0', SUCCEED, 'found', '\0'),
(r'[^a\0]', '\0', FAIL),
# Test various letter escapes
(r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
(r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
# NOTE: not an error under PCRE/PRE:
# (r'\u', '', SYNTAX_ERROR), # A Perl escape
(r'\c\e\g\h\i\j\k\m\o\p\q\y\z', 'ceghijkmopqyz', SUCCEED, 'found', 'ceghijkmopqyz'),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ffffffffffffff', '\377', FAIL, 'found', chr(255)),
(r'\x00f', '\017', FAIL, 'found', chr(15)),
(r'\x00fe', '\376', FAIL, 'found', chr(254)),
# (r'\x00ffffffffffffff', '\377', SUCCEED, 'found', chr(255)),
# (r'\x00f', '\017', SUCCEED, 'found', chr(15)),
# (r'\x00fe', '\376', SUCCEED, 'found', chr(254)),
(r"^\w+=(\\[\000-\277]|[^\n\\])*", "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c",
SUCCEED, 'found', "SRC=eval.c g.c blah blah blah \\\\"),
# Test that . only matches \n in DOTALL mode
('a.b', 'acb', SUCCEED, 'found', 'acb'),
('a.b', 'a\nb', FAIL),
('a.*b', 'acc\nccb', FAIL),
('a.{4,5}b', 'acc\nccb', FAIL),
('a.b', 'a\rb', SUCCEED, 'found', 'a\rb'),
('a.b(?s)', 'a\nb', SUCCEED, 'found', 'a\nb'),
('a.*(?s)b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.{4,5}b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
(')', '', SYNTAX_ERROR), # Unmatched right bracket
('', '', SUCCEED, 'found', ''), # Empty pattern
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found+"-"', '-'),
('$', 'abc', SUCCEED, 'found+"-"', '-'),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[\\-b]', 'a-', SUCCEED, 'found', 'a-'),
# ('a[b-]', 'a-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a\\', '-', SYNTAX_ERROR),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('\\ba\\b', 'a-', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a-', SUCCEED, '"-"', '-'),
('\\by\\b', 'xy', FAIL),
('\\by\\b', 'yz', FAIL),
('\\by\\b', 'xyz', FAIL),
('x\\b', 'xyz', FAIL),
('x\\B', 'xyz', SUCCEED, '"-"', '-'),
('\\Bz', 'xyz', SUCCEED, '"-"', '-'),
('z\\B', 'xyz', FAIL),
('\\Bx', 'xyz', FAIL),
('\\Ba\\B', 'a-', FAIL, '"-"', '-'),
('\\Ba\\B', '-a', FAIL, '"-"', '-'),
('\\Ba\\B', '-a-', FAIL, '"-"', '-'),
('\\By\\B', 'xy', FAIL),
('\\By\\B', 'yz', FAIL),
('\\By\\b', 'xy', SUCCEED, '"-"', '-'),
('\\by\\B', 'yz', SUCCEED, '"-"', '-'),
('\\By\\B', 'xyz', SUCCEED, '"-"', '-'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('$b', 'b', FAIL),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL, 'xg1y', 'xy'),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
('(a+).\\1$', 'aaaaa', SUCCEED, 'found+"-"+g1', 'aaaaa-aa'),
('^(a+).\\1$', 'aaaa', FAIL),
('(abc)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('([a-c]+)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)+\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a).+\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(a)ba*\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(aa|a)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a|aa)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a+)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('([abc]*)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)(b)c|ab', 'ab', SUCCEED, 'found+"-"+g1+"-"+g2', 'ab-None-None'),
('(a)+x', 'aaax', SUCCEED, 'found+"-"+g1', 'aaax-a'),
('([ac])+x', 'aacx', SUCCEED, 'found+"-"+g1', 'aacx-c'),
('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED, 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', SUCCEED, 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
('([^N]*N)+', 'abNNxyzN', SUCCEED, 'found+"-"+g1', 'abNNxyzN-xyzN'),
('([^N]*N)+', 'abNNxyz', SUCCEED, 'found+"-"+g1', 'abNN-N'),
('([abc]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'abcx-abc'),
('([abc]*)x', 'abc', FAIL),
('([xyz]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'x-'),
('(a)+b|aac', 'aac', SUCCEED, 'found+"-"+g1', 'aac-None'),
# Test symbolic groups
('(?P<i d>aaa)a', 'aaaa', SYNTAX_ERROR),
('(?P<id>aaa)a', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aaa'),
('(?P<id>aa)(?P=id)', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aa'),
('(?P<id>aa)(?P=xd)', 'aaaa', SYNTAX_ERROR),
# Test octal escapes/memory references
('\\1', 'a', SYNTAX_ERROR),
# All tests from Perl
('ab{0,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,}bc', 'abq', FAIL),
('ab{1,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,3}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{3,4}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{4,5}bc', 'abbbbc', FAIL),
('ab{0,1}bc', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}c', 'abc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found', ''),
('$', 'abc', SUCCEED, 'found', ''),
('a[b-]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-a]', '-', SYNTAX_ERROR),
('*a', '-', SYNTAX_ERROR),
('(*)b', '-', SYNTAX_ERROR),
('a{1,}b{1,}c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a**', '-', SYNTAX_ERROR),
('a.+?c', 'abcabc', SUCCEED, 'found', 'abc'),
('(a+|b){0,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){1,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){0,1}', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
('([abc])*d', 'abbbcd', SUCCEED, 'found+"-"+g1', 'abbbcd-c'),
('([abc])*bcd', 'abcd', SUCCEED, 'found+"-"+g1', 'abcd-a'),
('^(ab|cd)e', 'abcde', FAIL),
('((((((((((a))))))))))', 'a', SUCCEED, 'g10', 'a'),
('((((((((((a))))))))))\\10', 'aa', SUCCEED, 'found', 'aa'),
# Python does not have the same rules for \\41 so this is a syntax error
# ('((((((((((a))))))))))\\41', 'aa', FAIL),
# ('((((((((((a))))))))))\\41', 'a!', SUCCEED, 'found', 'a!'),
('((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(?i)((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(?i)abc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'XBC', FAIL),
('(?i)abc', 'AXC', FAIL),
('(?i)abc', 'ABX', FAIL),
('(?i)abc', 'XABCY', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'ABABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab*?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{0,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab+?bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab+bc', 'ABC', FAIL),
('(?i)ab+bc', 'ABQ', FAIL),
('(?i)ab{1,}bc', 'ABQ', FAIL),
('(?i)ab+bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,3}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{3,4}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{4,5}?bc', 'ABBBBC', FAIL),
('(?i)ab??bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab??bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab??bc', 'ABBBBC', FAIL),
('(?i)ab??c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABCC', FAIL),
('(?i)^abc', 'ABCC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'AABC', FAIL),
('(?i)abc$', 'AABC', SUCCEED, 'found', 'ABC'),
('(?i)^', 'ABC', SUCCEED, 'found', ''),
('(?i)$', 'ABC', SUCCEED, 'found', ''),
('(?i)a.c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)a.c', 'AXC', SUCCEED, 'found', 'AXC'),
('(?i)a.*?c', 'AXYZC', SUCCEED, 'found', 'AXYZC'),
('(?i)a.*c', 'AXYZD', FAIL),
('(?i)a[bc]d', 'ABC', FAIL),
('(?i)a[bc]d', 'ABD', SUCCEED, 'found', 'ABD'),
('(?i)a[b-d]e', 'ABD', FAIL),
('(?i)a[b-d]e', 'ACE', SUCCEED, 'found', 'ACE'),
('(?i)a[b-d]', 'AAC', SUCCEED, 'found', 'AC'),
('(?i)a[-b]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-a]', '-', SYNTAX_ERROR),
('(?i)a[]b', '-', SYNTAX_ERROR),
('(?i)a[', '-', SYNTAX_ERROR),
('(?i)a]', 'A]', SUCCEED, 'found', 'A]'),
('(?i)a[]]b', 'A]B', SUCCEED, 'found', 'A]B'),
('(?i)a[^bc]d', 'AED', SUCCEED, 'found', 'AED'),
('(?i)a[^bc]d', 'ABD', FAIL),
('(?i)a[^-b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)a[^-b]c', 'A-C', FAIL),
('(?i)a[^]b]c', 'A]C', FAIL),
('(?i)a[^]b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)ab|cd', 'ABC', SUCCEED, 'found', 'AB'),
('(?i)ab|cd', 'ABCD', SUCCEED, 'found', 'AB'),
('(?i)()ef', 'DEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)*a', '-', SYNTAX_ERROR),
('(?i)(*)b', '-', SYNTAX_ERROR),
('(?i)$b', 'B', FAIL),
('(?i)a\\', '-', SYNTAX_ERROR),
('(?i)a\\(b', 'A(B', SUCCEED, 'found+"-"+g1', 'A(B-Error'),
('(?i)a\\(*b', 'AB', SUCCEED, 'found', 'AB'),
('(?i)a\\(*b', 'A((B', SUCCEED, 'found', 'A((B'),
('(?i)a\\\\b', 'A\\B', SUCCEED, 'found', 'A\\B'),
('(?i)abc)', '-', SYNTAX_ERROR),
('(?i)(abc', '-', SYNTAX_ERROR),
('(?i)((a))', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'A-A-A'),
('(?i)(a)b(c)', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABC-A-C'),
('(?i)a+b+c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a{1,}b{1,}c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a**', '-', SYNTAX_ERROR),
('(?i)a.+?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.*?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.{0,5}?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)(a+|b)*', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){0,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)+', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){1,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)?', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}?', 'AB', SUCCEED, 'found+"-"+g1', '-None'),
('(?i))(', '-', SYNTAX_ERROR),
('(?i)[^ab]*', 'CDE', SUCCEED, 'found', 'CDE'),
('(?i)abc', '', FAIL),
('(?i)a*', '', SUCCEED, 'found', ''),
('(?i)([abc])*d', 'ABBBCD', SUCCEED, 'found+"-"+g1', 'ABBBCD-C'),
('(?i)([abc])*bcd', 'ABCD', SUCCEED, 'found+"-"+g1', 'ABCD-A'),
('(?i)a|b|c|d|e', 'E', SUCCEED, 'found', 'E'),
('(?i)(a|b|c|d|e)f', 'EF', SUCCEED, 'found+"-"+g1', 'EF-E'),
('(?i)abcd*efg', 'ABCDEFG', SUCCEED, 'found', 'ABCDEFG'),
('(?i)ab*', 'XABYABBBZ', SUCCEED, 'found', 'AB'),
('(?i)ab*', 'XAYABBBZ', SUCCEED, 'found', 'A'),
('(?i)(ab|cd)e', 'ABCDE', SUCCEED, 'found+"-"+g1', 'CDE-CD'),
('(?i)[abhgefdc]ij', 'HIJ', SUCCEED, 'found', 'HIJ'),
('(?i)^(ab|cd)e', 'ABCDE', FAIL),
('(?i)(abc|)ef', 'ABCDEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)(a|b)c*d', 'ABCD', SUCCEED, 'found+"-"+g1', 'BCD-B'),
('(?i)(ab|ab*)bc', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-A'),
('(?i)a([bc]*)c*', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-BC'),
('(?i)a([bc]*)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]+)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]*)(c+d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-B-CD'),
('(?i)a[bcd]*dcdcde', 'ADCDCDE', SUCCEED, 'found', 'ADCDCDE'),
('(?i)a[bcd]+dcdcde', 'ADCDCDE', FAIL),
('(?i)(ab|a)b*c', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-AB'),
('(?i)((a)(b)c)(d)', 'ABCD', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'ABC-A-B-D'),
('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', SUCCEED, 'found', 'ALPHA'),
('(?i)^a(bc+|b[eh])g|.h$', 'ABH', SUCCEED, 'found+"-"+g1', 'BH-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', SUCCEED, 'found+"-"+g1+"-"+g2', 'IJ-IJ-J'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)((((((((((a))))))))))', 'A', SUCCEED, 'g10', 'A'),
('(?i)((((((((((a))))))))))\\10', 'AA', SUCCEED, 'found', 'AA'),
#('(?i)((((((((((a))))))))))\\41', 'AA', FAIL),
#('(?i)((((((((((a))))))))))\\41', 'A!', SUCCEED, 'found', 'A!'),
('(?i)(((((((((a)))))))))', 'A', SUCCEED, 'found', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', SUCCEED, 'g1', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', SUCCEED, 'g1', 'C'),
('(?i)multiple words of text', 'UH-UH', FAIL),
('(?i)multiple words', 'MULTIPLE WORDS, YEAH', SUCCEED, 'found', 'MULTIPLE WORDS'),
('(?i)(.*)c(.*)', 'ABCDE', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCDE-AB-DE'),
('(?i)\\((.*), (.*)\\)', '(A, B)', SUCCEED, 'g2+"-"+g1', 'B-A'),
('(?i)[k]', 'AB', FAIL),
# ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', 'ABCD-$&-\\ABCD'),
# ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', 'BC-$1-\\BC'),
('(?i)a[-]?c', 'AC', SUCCEED, 'found', 'AC'),
('(?i)(abc)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('(?i)([a-c]*)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('a(?!b).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=c|d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?:b|c|d)(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)*(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)+?(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', SUCCEED, 'g1 + g2', 'ce'),
# lookbehind: split by : but not if it is escaped by -.
('(?<!-):(.*?)(?<!-):', 'a:bc-:de:f', SUCCEED, 'g1', 'bc-:de' ),
# escaping with \ as we know it
('(?<!\\\):(.*?)(?<!\\\):', 'a:bc\\:de:f', SUCCEED, 'g1', 'bc\\:de' ),
# terminating with ' and escaping with ? as in edifact
("(?<!\\?)'(.*?)(?<!\\?)'", "a'bc?'de'f", SUCCEED, 'g1', "bc?'de" ),
# Comments using the (?#...) syntax
('w(?# comment', 'w', SYNTAX_ERROR),
('w(?# comment 1)xy(?# comment 2)z', 'wxyz', SUCCEED, 'found', 'wxyz'),
# Check odd placement of embedded pattern modifiers
# not an error under PCRE/PRE:
('w(?i)', 'W', SUCCEED, 'found', 'W'),
# ('w(?i)', 'W', SYNTAX_ERROR),
# Comments using the x embedded pattern modifier
("""(?x)w# comment 1
x y
# comment 2
z""", 'wxyz', SUCCEED, 'found', 'wxyz'),
# using the m embedded pattern modifier
('^abc', """jkl
abc
xyz""", FAIL),
('(?m)^abc', """jkl
xyz""", SUCCEED, 'found', 'abc'),
('(?m)abc$', """jkl
xyzabc
123""", SUCCEED, 'found', 'abc'),
# using the s embedded pattern modifier
# test \w, etc. both inside and outside character classes
('\\w+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('[\\w]+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('\\D+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\D]+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\da-fA-F]+', '123abc', SUCCEED, 'found', '123abc'),
# ('[\\d-x]', '-', SYNTAX_ERROR),
(r'([\s]*)([\S]*)([\s]*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'(\s*)(\S*)(\s*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'\x00ff', '\377', FAIL),
# (r'\x00ff', '\377', SUCCEED, 'found', chr(255)),
(r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
('\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
(r'\t\n\v\r\f\a', '\t\n\v\r\f\a', SUCCEED, 'found', chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)),
(r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', SUCCEED, 'found', '\t\n\v\r\f\b'),
#
# post-1.5.2 additions
# xmllib problem
(r'(([a-z]+):)?([a-z]+)$', 'smil', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-smil'),
# bug 110866: reference to undefined group
(r'((.)\1+)', '', SYNTAX_ERROR),
# bug 111869: search (PRE/PCRE fails on this one, SRE doesn't)
(r'.*d', 'abc\nabd', SUCCEED, 'found', 'abd'),
# bug 112468: various expected syntax errors
(r'(', '', SYNTAX_ERROR),
(r'[\41]', '!', SUCCEED, 'found', '!'),
# bug 114033: nothing to repeat
(r'(x?)?', 'x', SUCCEED, 'found', 'x'),
# bug 115040: rescan if flags are modified inside pattern
(r' (?x)foo ', 'foo', SUCCEED, 'found', 'foo'),
# bug 115618: negative lookahead
(r'(?<!abc)(d.f)', 'abcdefdof', SUCCEED, 'found', 'dof'),
# bug 116251: character class bug
(r'[\w-]+', 'laser_beam', SUCCEED, 'found', 'laser_beam'),
# bug 123769+127259: non-greedy backtracking bug
(r'.*?\S *:', 'xx:', SUCCEED, 'found', 'xx:'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
# bug 127259: \Z shouldn't depend on multiline mode
(r'(?ms).*?x\s*\Z(.*)','xx\nx\n', SUCCEED, 'g1', ''),
# bug 128899: uppercase literals under the ignorecase flag
(r'(?i)M+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)m+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[M]+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[m]+', 'MMM', SUCCEED, 'found', 'MMM'),
# bug 130748: ^* should be an error (nothing to repeat)
(r'^*', '', SYNTAX_ERROR),
# bug 133283: minimizing repeat problem
(r'"(?:\\"|[^"])*?"', r'"\""', SUCCEED, 'found', r'"\""'),
# bug 477728: minimizing repeat problem
(r'^.*?$', 'one\ntwo\nthree\n', FAIL),
# bug 483789: minimizing repeat problem
(r'a[^>]*?b', 'a>b', FAIL),
# bug 490573: minimizing repeat problem
(r'^a*?$', 'foo', FAIL),
# bug 470582: nested groups problem
(r'^((a)c)?(ab)$', 'ab', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-ab'),
# another minimizing repeat problem (capturing groups in assertions)
('^([ab]*?)(?=(b)?)c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?!(b))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?<!(a))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
u = '\N{LATIN CAPITAL LETTER A WITH DIAERESIS}'
tests.extend([
# bug 410271: \b broken under locales
(r'\b.\b', 'a', SUCCEED, 'found', 'a'),
(r'(?u)\b.\b', u, SUCCEED, 'found', u),
(r'(?u)\w', u, SUCCEED, 'found', u),
])

28
regex/sliceQueue.go Normal file
View File

@@ -0,0 +1,28 @@
package regex
import "errors"
// Helper functions for slices, to make them behave more like stacks
func peek[T any](s []T) (T, error) {
if len(s) < 1 {
return *new(T), errors.New("stack empty")
}
return s[len(s)-1], nil
}
func mustPop[T any](sp *[]T) T {
val, err := pop(sp)
if err != nil {
panic(err)
}
return val
}
func pop[T any](sp *[]T) (T, error) {
if len(*sp) < 1 {
return *new(T), errors.New("stack empty")
}
to_return := (*sp)[len(*sp)-1]
*sp = (*sp)[:len(*sp)-1]
return to_return, nil
}

19
regex/stateContents.go Normal file
View File

@@ -0,0 +1,19 @@
package regex
type stateContents []int // Represents the contents of the current state - character classes can have multiple contents, which is why it is represented as a slice
func newContents(data ...int) stateContents {
toReturn := stateContents{}
for _, i := range data {
toReturn = append(toReturn, i)
}
return toReturn
}
func rune2Contents(data []rune) stateContents { // Convert a rune slice into a stateContents type, then return it. The runes are simply cast to ints.
toReturn := newContents()
for _, r := range data {
toReturn = append(toReturn, int(r))
}
return toReturn
}

7
regex/todo.txt Normal file
View File

@@ -0,0 +1,7 @@
1. Error-checking - invalid regex, Out-of-bounds stuff. Do more testing.
2. More features - Non-greedy operators, backreferences, non-capturing groups, escaped characters inside character classes
3. Fix adding concatenation operators in shunting-yard function (very janky, compares against operators individually)
Ideas for flags:
-m <num> : Print <num>th match (-m 1 = first match, -m 2 = second match)
-g <num> : Print the <num>th group
4. Refactor code for flags - make each flag's code a function, which modifies the result of findAllMatches