Merge "Merge remote-tracking branch 'aosp/upstream-master' into up-shaderc2"
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..a671fe2
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+*.pyc
+*.orig
+core
+obj/
+benchlog.*
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..0ddc4a9
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,170 @@
+language: cpp
+sudo: false
+dist: trusty
+script:
+  - make
+  - make test
+matrix:
+  include:
+
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+          packages:
+            - g++-4.8
+      env:
+         - MATRIX_EVAL="CC=gcc-4.8 CXX=g++-4.8"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+          packages:
+            - g++-4.9
+      env:
+         - MATRIX_EVAL="CC=gcc-4.9 CXX=g++-4.9"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+          packages:
+            - g++-5
+      env:
+         - MATRIX_EVAL="CC=gcc-5 CXX=g++-5"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+          packages:
+            - g++-6
+      env:
+        - MATRIX_EVAL="CC=gcc-6 CXX=g++-6"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+          packages:
+            - g++-7
+      env:
+        - MATRIX_EVAL="CC=gcc-7 CXX=g++-7"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+          packages:
+            - g++-8
+      env:
+        - MATRIX_EVAL="CC=gcc-8 CXX=g++-8"
+
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+            - llvm-toolchain-precise-3.5
+          packages:
+            - clang-3.5
+      env:
+        - MATRIX_EVAL="CC=clang-3.5 CXX=clang++-3.5"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+            - llvm-toolchain-precise-3.6
+          packages:
+            - clang-3.6
+      env:
+        - MATRIX_EVAL="CC=clang-3.6 CXX=clang++-3.6"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+            - llvm-toolchain-precise-3.7
+          packages:
+            - clang-3.7
+      env:
+        - MATRIX_EVAL="CC=clang-3.7 CXX=clang++-3.7"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+            - llvm-toolchain-precise-3.8
+          packages:
+            - clang-3.8
+      env:
+        - MATRIX_EVAL="CC=clang-3.8 CXX=clang++-3.8"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+            - llvm-toolchain-precise-3.9
+          packages:
+            - clang-3.9
+      env:
+        - MATRIX_EVAL="CC=clang-3.9 CXX=clang++-3.9"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+            - llvm-toolchain-trusty-4.0
+          packages:
+            - clang-4.0
+      env:
+        - MATRIX_EVAL="CC=clang-4.0 CXX=clang++-4.0"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+            - llvm-toolchain-trusty-5.0
+          packages:
+            - clang-5.0
+      env:
+        - MATRIX_EVAL="CC=clang-5.0 CXX=clang++-5.0"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+            - sourceline: 'deb https://apt.llvm.org/trusty/ llvm-toolchain-trusty-6.0 main'
+              key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
+          packages:
+            - clang-6.0
+      env:
+        - MATRIX_EVAL="CC=clang-6.0 CXX=clang++-6.0"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+            - sourceline: 'deb https://apt.llvm.org/trusty/ llvm-toolchain-trusty-7 main'
+              key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
+          packages:
+            - clang-7
+      env:
+        - MATRIX_EVAL="CC=clang-7 CXX=clang++-7"
+    - os: linux
+      addons:
+        apt:
+          sources:
+            - ubuntu-toolchain-r-test
+            - sourceline: 'deb https://apt.llvm.org/trusty/ llvm-toolchain-trusty-8 main'
+              key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
+          packages:
+            - clang-8
+      env:
+        - MATRIX_EVAL="CC=clang-8 CXX=clang++-8"
+
+before_install:
+  - eval "${MATRIX_EVAL}"
diff --git a/AUTHORS b/AUTHORS
index 3c0f928..0754006 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -9,4 +9,5 @@
 # Please keep the list sorted.
 
 Google Inc.
+Samsung Electronics
 Stefano Rivera <stefano.rivera@gmail.com>
diff --git a/BUILD b/BUILD
new file mode 100644
index 0000000..30ce320
--- /dev/null
+++ b/BUILD
@@ -0,0 +1,239 @@
+# Copyright 2009 The RE2 Authors.  All Rights Reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Bazel (http://bazel.io/) BUILD file for RE2.
+
+licenses(["notice"])
+
+exports_files(["LICENSE"])
+
+config_setting(
+    name = "darwin",
+    values = {"cpu": "darwin"},
+)
+
+config_setting(
+    name = "windows",
+    values = {"cpu": "x64_windows"},
+)
+
+config_setting(
+    name = "windows_msvc",
+    values = {"cpu": "x64_windows_msvc"},
+)
+
+cc_library(
+    name = "re2",
+    srcs = [
+        "re2/bitmap256.h",
+        "re2/bitstate.cc",
+        "re2/compile.cc",
+        "re2/dfa.cc",
+        "re2/filtered_re2.cc",
+        "re2/mimics_pcre.cc",
+        "re2/nfa.cc",
+        "re2/onepass.cc",
+        "re2/parse.cc",
+        "re2/perl_groups.cc",
+        "re2/prefilter.cc",
+        "re2/prefilter.h",
+        "re2/prefilter_tree.cc",
+        "re2/prefilter_tree.h",
+        "re2/prog.cc",
+        "re2/prog.h",
+        "re2/re2.cc",
+        "re2/regexp.cc",
+        "re2/regexp.h",
+        "re2/set.cc",
+        "re2/simplify.cc",
+        "re2/stringpiece.cc",
+        "re2/tostring.cc",
+        "re2/unicode_casefold.cc",
+        "re2/unicode_casefold.h",
+        "re2/unicode_groups.cc",
+        "re2/unicode_groups.h",
+        "re2/walker-inl.h",
+        "util/flags.h",
+        "util/logging.h",
+        "util/mix.h",
+        "util/mutex.h",
+        "util/pod_array.h",
+        "util/rune.cc",
+        "util/sparse_array.h",
+        "util/sparse_set.h",
+        "util/strutil.cc",
+        "util/strutil.h",
+        "util/utf.h",
+        "util/util.h",
+    ],
+    hdrs = [
+        "re2/filtered_re2.h",
+        "re2/re2.h",
+        "re2/set.h",
+        "re2/stringpiece.h",
+    ],
+    copts = select({
+        ":windows": [],
+        ":windows_msvc": [],
+        "//conditions:default": ["-pthread"],
+    }),
+    linkopts = select({
+        # Darwin doesn't need `-pthread' when linking and it appears that
+        # older versions of Clang will warn about the unused command line
+        # argument, so just don't pass it.
+        ":darwin": [],
+        ":windows": [],
+        ":windows_msvc": [],
+        "//conditions:default": ["-pthread"],
+    }),
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "testing",
+    testonly = 1,
+    srcs = [
+        "re2/testing/backtrack.cc",
+        "re2/testing/dump.cc",
+        "re2/testing/exhaustive_tester.cc",
+        "re2/testing/null_walker.cc",
+        "re2/testing/regexp_generator.cc",
+        "re2/testing/string_generator.cc",
+        "re2/testing/tester.cc",
+        "util/pcre.cc",
+    ],
+    hdrs = [
+        "re2/testing/exhaustive_tester.h",
+        "re2/testing/regexp_generator.h",
+        "re2/testing/string_generator.h",
+        "re2/testing/tester.h",
+        "util/benchmark.h",
+        "util/pcre.h",
+        "util/test.h",
+    ],
+    deps = [":re2"],
+)
+
+cc_library(
+    name = "test",
+    testonly = 1,
+    srcs = ["util/test.cc"],
+    deps = [":testing"],
+)
+
+load(":re2_test.bzl", "re2_test")
+
+re2_test(
+    "charclass_test",
+    size = "small",
+)
+
+re2_test(
+    "compile_test",
+    size = "small",
+)
+
+re2_test(
+    "filtered_re2_test",
+    size = "small",
+)
+
+re2_test(
+    "mimics_pcre_test",
+    size = "small",
+)
+
+re2_test(
+    "parse_test",
+    size = "small",
+)
+
+re2_test(
+    "possible_match_test",
+    size = "small",
+)
+
+re2_test(
+    "re2_arg_test",
+    size = "small",
+)
+
+re2_test(
+    "re2_test",
+    size = "small",
+)
+
+re2_test(
+    "regexp_test",
+    size = "small",
+)
+
+re2_test(
+    "required_prefix_test",
+    size = "small",
+)
+
+re2_test(
+    "search_test",
+    size = "small",
+)
+
+re2_test(
+    "set_test",
+    size = "small",
+)
+
+re2_test(
+    "simplify_test",
+    size = "small",
+)
+
+re2_test(
+    "string_generator_test",
+    size = "small",
+)
+
+re2_test(
+    "dfa_test",
+    size = "large",
+)
+
+re2_test(
+    "exhaustive1_test",
+    size = "large",
+)
+
+re2_test(
+    "exhaustive2_test",
+    size = "large",
+)
+
+re2_test(
+    "exhaustive3_test",
+    size = "large",
+)
+
+re2_test(
+    "exhaustive_test",
+    size = "large",
+)
+
+re2_test(
+    "random_test",
+    size = "large",
+)
+
+cc_library(
+    name = "benchmark",
+    testonly = 1,
+    srcs = ["util/benchmark.cc"],
+    deps = [":testing"],
+)
+
+cc_binary(
+    name = "regexp_benchmark",
+    testonly = 1,
+    srcs = ["re2/testing/regexp_benchmark.cc"],
+    deps = [":benchmark"],
+)
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..f87c4ff
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,150 @@
+# Copyright 2015 The RE2 Authors.  All Rights Reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Old enough to support Ubuntu Trusty.
+cmake_minimum_required(VERSION 2.8.12)
+
+if(POLICY CMP0048)
+  cmake_policy(SET CMP0048 NEW)
+endif()
+
+project(RE2 CXX)
+include(CTest)
+
+option(BUILD_SHARED_LIBS "build shared libraries" OFF)
+option(USEPCRE "use PCRE in tests and benchmarks" OFF)
+
+# CMake seems to have no way to enable/disable testing per subproject,
+# so we provide an option similar to BUILD_TESTING, but just for RE2.
+option(RE2_BUILD_TESTING "enable testing for RE2" ON)
+
+set(EXTRA_TARGET_LINK_LIBRARIES)
+
+if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
+  if(MSVC_VERSION LESS 1900)
+    message(FATAL_ERROR "you need Visual Studio 2015 or later")
+  endif()
+  if(BUILD_SHARED_LIBS)
+    # See http://www.kitware.com/blog/home/post/939 for details.
+    cmake_minimum_required(VERSION 3.4)
+    set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
+  endif()
+  # CMake defaults to /W3, but some users like /W4 (or /Wall) and /WX,
+  # so we disable various warnings that aren't particularly helpful.
+  add_compile_options(/wd4100 /wd4201 /wd4456 /wd4457 /wd4702 /wd4815)
+  # Without a byte order mark (BOM), Visual Studio assumes that the source
+  # file is encoded using the current user code page, so we specify UTF-8.
+  add_compile_options(/utf-8)
+elseif(CYGWIN OR MINGW)
+  # See https://stackoverflow.com/questions/38139631 for details.
+  add_compile_options(-std=gnu++11)
+elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang")
+  add_compile_options(-std=c++11)
+endif()
+
+if(WIN32)
+  add_definitions(-DUNICODE -D_UNICODE -DSTRICT -DNOMINMAX)
+  add_definitions(-D_CRT_SECURE_NO_WARNINGS -D_SCL_SECURE_NO_WARNINGS)
+elseif(UNIX)
+  add_compile_options(-pthread)
+  list(APPEND EXTRA_TARGET_LINK_LIBRARIES -pthread)
+endif()
+
+if(USEPCRE)
+  add_definitions(-DUSEPCRE)
+  list(APPEND EXTRA_TARGET_LINK_LIBRARIES pcre)
+endif()
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+
+set(RE2_SOURCES
+    re2/bitstate.cc
+    re2/compile.cc
+    re2/dfa.cc
+    re2/filtered_re2.cc
+    re2/mimics_pcre.cc
+    re2/nfa.cc
+    re2/onepass.cc
+    re2/parse.cc
+    re2/perl_groups.cc
+    re2/prefilter.cc
+    re2/prefilter_tree.cc
+    re2/prog.cc
+    re2/re2.cc
+    re2/regexp.cc
+    re2/set.cc
+    re2/simplify.cc
+    re2/stringpiece.cc
+    re2/tostring.cc
+    re2/unicode_casefold.cc
+    re2/unicode_groups.cc
+    util/rune.cc
+    util/strutil.cc
+    )
+
+add_library(re2 ${RE2_SOURCES})
+
+if(RE2_BUILD_TESTING)
+  set(TESTING_SOURCES
+      re2/testing/backtrack.cc
+      re2/testing/dump.cc
+      re2/testing/exhaustive_tester.cc
+      re2/testing/null_walker.cc
+      re2/testing/regexp_generator.cc
+      re2/testing/string_generator.cc
+      re2/testing/tester.cc
+      util/pcre.cc
+      )
+
+  add_library(testing STATIC ${TESTING_SOURCES})
+
+  set(TEST_TARGETS
+      charclass_test
+      compile_test
+      filtered_re2_test
+      mimics_pcre_test
+      parse_test
+      possible_match_test
+      re2_test
+      re2_arg_test
+      regexp_test
+      required_prefix_test
+      search_test
+      set_test
+      simplify_test
+      string_generator_test
+
+      dfa_test
+      exhaustive1_test
+      exhaustive2_test
+      exhaustive3_test
+      exhaustive_test
+      random_test
+      )
+
+  set(BENCHMARK_TARGETS
+      regexp_benchmark
+      )
+
+  foreach(target ${TEST_TARGETS})
+    add_executable(${target} re2/testing/${target}.cc util/test.cc)
+    target_link_libraries(${target} testing re2 ${EXTRA_TARGET_LINK_LIBRARIES})
+    add_test(NAME ${target} COMMAND ${target})
+  endforeach(target)
+
+  foreach(target ${BENCHMARK_TARGETS})
+    add_executable(${target} re2/testing/${target}.cc util/benchmark.cc)
+    target_link_libraries(${target} testing re2 ${EXTRA_TARGET_LINK_LIBRARIES})
+  endforeach(target)
+endif()
+
+set(RE2_HEADERS
+    re2/filtered_re2.h
+    re2/re2.h
+    re2/set.h
+    re2/stringpiece.h
+    )
+
+install(FILES ${RE2_HEADERS} DESTINATION include/re2)
+install(TARGETS re2 ARCHIVE DESTINATION lib LIBRARY DESTINATION lib RUNTIME DESTINATION bin)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..3af2b0a
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,2 @@
+RE2 uses Gerrit instead of GitHub pull requests.
+See the [Contributing](https://github.com/google/re2/wiki/Contribute) wiki page.
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index 7b44e04..1a1c848 100644
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -27,9 +27,15 @@
 # Please keep the list sorted.
 
 Dominic Battré <battre@chromium.org>
+Doug Kwan <dougkwan@google.com>
+Dmitriy Vyukov <dvyukov@google.com>
 John Millikin <jmillikin@gmail.com>
+Mike Nazarewicz <mpn@google.com>
+Nico Weber <thakis@chromium.org>
+Pawel Hajdan <phajdan.jr@gmail.com>
 Rob Pike <r@google.com>
 Russ Cox <rsc@swtch.com>
 Sanjay Ghemawat <sanjay@google.com>
 Stefano Rivera <stefano.rivera@gmail.com>
 Srinivasan Venkatachary <vsri@google.com>
+Viatcheslav Ostapenko <sl.ostapenko@samsung.com>
diff --git a/Makefile b/Makefile
index 4ded8ec..f001f06 100644
--- a/Makefile
+++ b/Makefile
@@ -2,33 +2,46 @@
 # Use of this source code is governed by a BSD-style
 # license that can be found in the LICENSE file.
 
-all: obj/libre2.a obj/so/libre2.so
+# To build against ICU for full Unicode properties support,
+# uncomment the next two lines:
+# CCICU=$(shell pkg-config icu-uc --cflags) -DRE2_USE_ICU
+# LDICU=$(shell pkg-config icu-uc --libs)
 
-# to build against PCRE for testing or benchmarking,
-# uncomment the next two lines
+# To build against PCRE for testing or benchmarking,
+# uncomment the next two lines:
 # CCPCRE=-I/usr/local/include -DUSEPCRE
 # LDPCRE=-L/usr/local/lib -lpcre
 
-CXX=g++
-CXXFLAGS=-Wall -O3 -g -pthread  # can override
-RE2_CXXFLAGS=-Wno-sign-compare -c -I. $(CCPCRE)  # required
-LDFLAGS=-pthread
-AR=ar
-ARFLAGS=rsc
-NM=nm
-NMFLAGS=-p
+CXX?=g++
+# can override
+CXXFLAGS?=-O3 -g
+LDFLAGS?=
+# required
+RE2_CXXFLAGS?=-std=c++11 -pthread -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers -I. $(CCICU) $(CCPCRE)
+RE2_LDFLAGS?=-pthread $(LDICU) $(LDPCRE)
+AR?=ar
+ARFLAGS?=rsc
+NM?=nm
+NMFLAGS?=-p
 
 # Variables mandated by GNU, the arbiter of all good taste on the internet.
 # http://www.gnu.org/prep/standards/standards.html
 prefix=/usr/local
 exec_prefix=$(prefix)
-bindir=$(exec_prefix)/bin
 includedir=$(prefix)/include
 libdir=$(exec_prefix)/lib
 INSTALL=install
-INSTALL_PROGRAM=$(INSTALL)
 INSTALL_DATA=$(INSTALL) -m 644
 
+# Work around the weirdness of sed(1) on Darwin. :/
+ifeq ($(shell uname),Darwin)
+SED_INPLACE=sed -i ''
+else ifeq ($(shell uname),SunOS)
+SED_INPLACE=sed -i
+else
+SED_INPLACE=sed -i
+endif
+
 # ABI version
 # http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html
 SONAME=0
@@ -37,34 +50,47 @@
 # access for Unicode data), uncomment the following line:
 # REBUILD_TABLES=1
 
+# The SunOS linker does not support wildcards. :(
 ifeq ($(shell uname),Darwin)
-MAKE_SHARED_LIBRARY=$(CXX) -dynamiclib $(LDFLAGS) -exported_symbols_list libre2.symbols.darwin
+SOEXT=dylib
+SOEXTVER=$(SONAME).$(SOEXT)
+SOEXTVER00=$(SONAME).0.0.$(SOEXT)
+MAKE_SHARED_LIBRARY=$(CXX) -dynamiclib -Wl,-install_name,$(libdir)/libre2.$(SOEXTVER),-exported_symbols_list,libre2.symbols.darwin $(RE2_LDFLAGS) $(LDFLAGS)
+else ifeq ($(shell uname),SunOS)
+SOEXT=so
+SOEXTVER=$(SOEXT).$(SONAME)
+SOEXTVER00=$(SOEXT).$(SONAME).0.0
+MAKE_SHARED_LIBRARY=$(CXX) -shared -Wl,-soname,libre2.$(SOEXTVER) $(RE2_LDFLAGS) $(LDFLAGS)
 else
-MAKE_SHARED_LIBRARY=$(CXX) -shared -Wl,-soname,libre2.so.$(SONAME),--version-script=libre2.symbols $(LDFLAGS)
+SOEXT=so
+SOEXTVER=$(SOEXT).$(SONAME)
+SOEXTVER00=$(SOEXT).$(SONAME).0.0
+MAKE_SHARED_LIBRARY=$(CXX) -shared -Wl,-soname,libre2.$(SOEXTVER),--version-script,libre2.symbols $(RE2_LDFLAGS) $(LDFLAGS)
 endif
 
+all: obj/libre2.a obj/so/libre2.$(SOEXT)
+
 INSTALL_HFILES=\
 	re2/filtered_re2.h\
 	re2/re2.h\
 	re2/set.h\
 	re2/stringpiece.h\
-	re2/variadic_function.h\
 
 HFILES=\
-	util/arena.h\
-	util/atomicops.h\
 	util/benchmark.h\
 	util/flags.h\
 	util/logging.h\
+	util/mix.h\
 	util/mutex.h\
 	util/pcre.h\
-	util/random.h\
+	util/pod_array.h\
 	util/sparse_array.h\
 	util/sparse_set.h\
+	util/strutil.h\
 	util/test.h\
 	util/utf.h\
 	util/util.h\
-	util/valgrind.h\
+	re2/bitmap256.h\
 	re2/filtered_re2.h\
 	re2/prefilter.h\
 	re2/prefilter_tree.h\
@@ -79,17 +105,11 @@
 	re2/testing/tester.h\
 	re2/unicode_casefold.h\
 	re2/unicode_groups.h\
-	re2/variadic_function.h\
 	re2/walker-inl.h\
 
 OFILES=\
-	obj/util/arena.o\
-	obj/util/hash.o\
 	obj/util/rune.o\
-	obj/util/stringpiece.o\
-	obj/util/stringprintf.o\
 	obj/util/strutil.o\
-	obj/util/valgrind.o\
 	obj/re2/bitstate.o\
 	obj/re2/compile.o\
 	obj/re2/dfa.o\
@@ -106,14 +126,13 @@
 	obj/re2/regexp.o\
 	obj/re2/set.o\
 	obj/re2/simplify.o\
+	obj/re2/stringpiece.o\
 	obj/re2/tostring.o\
 	obj/re2/unicode_casefold.o\
 	obj/re2/unicode_groups.o\
 
 TESTOFILES=\
 	obj/util/pcre.o\
-	obj/util/random.o\
-	obj/util/thread.o\
 	obj/re2/testing/backtrack.o\
 	obj/re2/testing/dump.o\
 	obj/re2/testing/exhaustive_tester.o\
@@ -147,7 +166,7 @@
 	obj/test/random_test\
 
 SOFILES=$(patsubst obj/%,obj/so/%,$(OFILES))
-STESTOFILES=$(patsubst obj/%,obj/so/%,$(TESTOFILES))
+# We use TESTOFILES for testing the shared lib, only it is built differently.
 STESTS=$(patsubst obj/%,obj/so/%,$(TESTS))
 SBIGTESTS=$(patsubst obj/%,obj/so/%,$(BIGTESTS))
 
@@ -158,15 +177,15 @@
 
 obj/%.o: %.cc $(HFILES)
 	@mkdir -p $$(dirname $@)
-	$(CXX) -o $@ $(CPPFLAGS) $(CXXFLAGS) $(RE2_CXXFLAGS) -DNDEBUG $*.cc
+	$(CXX) -c -o $@ $(CPPFLAGS) $(RE2_CXXFLAGS) $(CXXFLAGS) -DNDEBUG $*.cc
 
 obj/dbg/%.o: %.cc $(HFILES)
 	@mkdir -p $$(dirname $@)
-	$(CXX) -o $@ -fPIC $(CPPFLAGS) $(CXXFLAGS) $(RE2_CXXFLAGS) $*.cc
+	$(CXX) -c -o $@ $(CPPFLAGS) $(RE2_CXXFLAGS) $(CXXFLAGS) $*.cc
 
 obj/so/%.o: %.cc $(HFILES)
 	@mkdir -p $$(dirname $@)
-	$(CXX) -o $@ -fPIC $(CPPFLAGS) $(CXXFLAGS) $(RE2_CXXFLAGS) -DNDEBUG $*.cc
+	$(CXX) -c -o $@ -fPIC $(CPPFLAGS) $(RE2_CXXFLAGS) $(CXXFLAGS) -DNDEBUG $*.cc
 
 obj/libre2.a: $(OFILES)
 	@mkdir -p obj
@@ -176,26 +195,35 @@
 	@mkdir -p obj/dbg
 	$(AR) $(ARFLAGS) obj/dbg/libre2.a $(DOFILES)
 
-obj/so/libre2.so: $(SOFILES)
+obj/so/libre2.$(SOEXT): $(SOFILES)
 	@mkdir -p obj/so
-	$(MAKE_SHARED_LIBRARY) -o $@.$(SONAME) $(SOFILES)
-	ln -sf libre2.so.$(SONAME) $@
-
-obj/test/%: obj/libre2.a obj/re2/testing/%.o $(TESTOFILES) obj/util/test.o
-	@mkdir -p obj/test
-	$(CXX) -o $@ obj/re2/testing/$*.o $(TESTOFILES) obj/util/test.o obj/libre2.a $(LDFLAGS) $(LDPCRE)
+	$(MAKE_SHARED_LIBRARY) -o obj/so/libre2.$(SOEXTVER) $(SOFILES)
+	ln -sf libre2.$(SOEXTVER) $@
 
 obj/dbg/test/%: obj/dbg/libre2.a obj/dbg/re2/testing/%.o $(DTESTOFILES) obj/dbg/util/test.o
 	@mkdir -p obj/dbg/test
-	$(CXX) -o $@ obj/dbg/re2/testing/$*.o $(DTESTOFILES) obj/dbg/util/test.o obj/dbg/libre2.a $(LDFLAGS) $(LDPCRE)
+	$(CXX) -o $@ obj/dbg/re2/testing/$*.o $(DTESTOFILES) obj/dbg/util/test.o obj/dbg/libre2.a $(RE2_LDFLAGS) $(LDFLAGS)
 
-obj/so/test/%: obj/so/libre2.so obj/libre2.a obj/so/re2/testing/%.o $(STESTOFILES) obj/so/util/test.o
+obj/test/%: obj/libre2.a obj/re2/testing/%.o $(TESTOFILES) obj/util/test.o
+	@mkdir -p obj/test
+	$(CXX) -o $@ obj/re2/testing/$*.o $(TESTOFILES) obj/util/test.o obj/libre2.a $(RE2_LDFLAGS) $(LDFLAGS)
+
+# Test the shared lib, falling back to the static lib for private symbols
+obj/so/test/%: obj/so/libre2.$(SOEXT) obj/libre2.a obj/re2/testing/%.o $(TESTOFILES) obj/util/test.o
 	@mkdir -p obj/so/test
-	$(CXX) -o $@ obj/so/re2/testing/$*.o $(STESTOFILES) obj/so/util/test.o -Lobj/so -lre2 obj/libre2.a $(LDFLAGS) $(LDPCRE)
+	$(CXX) -o $@ obj/re2/testing/$*.o $(TESTOFILES) obj/util/test.o -Lobj/so -lre2 obj/libre2.a $(RE2_LDFLAGS) $(LDFLAGS)
 
 obj/test/regexp_benchmark: obj/libre2.a obj/re2/testing/regexp_benchmark.o $(TESTOFILES) obj/util/benchmark.o
 	@mkdir -p obj/test
-	$(CXX) -o $@ obj/re2/testing/regexp_benchmark.o $(TESTOFILES) obj/util/benchmark.o obj/libre2.a $(LDFLAGS) $(LDPCRE)
+	$(CXX) -o $@ obj/re2/testing/regexp_benchmark.o $(TESTOFILES) obj/util/benchmark.o obj/libre2.a $(RE2_LDFLAGS) $(LDFLAGS)
+
+# re2_fuzzer is a target for fuzzers like libFuzzer and AFL. This fake fuzzing
+# is simply a way to check that the target builds and then to run it against a
+# fixed set of inputs. To perform real fuzzing, refer to the documentation for
+# libFuzzer (llvm.org/docs/LibFuzzer.html) and AFL (lcamtuf.coredump.cx/afl/).
+obj/test/re2_fuzzer: obj/libre2.a obj/re2/fuzzing/re2_fuzzer.o obj/util/fuzz.o
+	@mkdir -p obj/test
+	$(CXX) -o $@ obj/re2/fuzzing/re2_fuzzer.o obj/util/fuzz.o obj/libre2.a $(RE2_LDFLAGS) $(LDFLAGS)
 
 ifdef REBUILD_TABLES
 re2/perl_groups.cc: re2/make_perl_groups.pl
@@ -203,6 +231,8 @@
 
 re2/unicode_%.cc: re2/make_unicode_%.py
 	python $< > $@
+
+.PRECIOUS: re2/perl_groups.cc re2/unicode_casefold.cc re2/unicode_groups.cc
 endif
 
 distclean: clean
@@ -217,22 +247,13 @@
 test: $(DTESTS) $(TESTS) $(STESTS) debug-test static-test shared-test
 
 debug-test: $(DTESTS)
-	@echo
-	@echo Running debug binary tests.
-	@echo
 	@./runtests $(DTESTS)
 
 static-test: $(TESTS)
-	@echo
-	@echo Running static binary tests.
-	@echo
 	@./runtests $(TESTS)
 
 shared-test: $(STESTS)
-	@echo
-	@echo Running dynamic binary tests.
-	@echo
-	@LD_LIBRARY_PATH=obj/so:$(LD_LIBRARY_PATH) ./runtests $(STESTS)
+	@./runtests -shared-library-path obj/so $(STESTS)
 
 debug-bigtest: $(DTESTS) $(DBIGTESTS)
 	@./runtests $(DTESTS) $(DBIGTESTS)
@@ -241,27 +262,59 @@
 	@./runtests $(TESTS) $(BIGTESTS)
 
 shared-bigtest: $(STESTS) $(SBIGTESTS)
-	@LD_LIBRARY_PATH=obj/so:$(LD_LIBRARY_PATH) ./runtests $(STESTS) $(SBIGTESTS)
+	@./runtests -shared-library-path obj/so $(STESTS) $(SBIGTESTS)
 
 benchmark: obj/test/regexp_benchmark
 
-install: obj/libre2.a obj/so/libre2.so
-	mkdir -p $(DESTDIR)$(includedir)/re2 $(DESTDIR)$(libdir)
+fuzz: obj/test/re2_fuzzer
+
+install: obj/libre2.a obj/so/libre2.$(SOEXT)
+	mkdir -p $(DESTDIR)$(includedir)/re2 $(DESTDIR)$(libdir)/pkgconfig
 	$(INSTALL_DATA) $(INSTALL_HFILES) $(DESTDIR)$(includedir)/re2
 	$(INSTALL) obj/libre2.a $(DESTDIR)$(libdir)/libre2.a
-	$(INSTALL) obj/so/libre2.so $(DESTDIR)$(libdir)/libre2.so.$(SONAME).0.0
-	ln -sf libre2.so.$(SONAME).0.0 $(DESTDIR)$(libdir)/libre2.so.$(SONAME)
-	ln -sf libre2.so.$(SONAME).0.0 $(DESTDIR)$(libdir)/libre2.so
+	$(INSTALL) obj/so/libre2.$(SOEXT) $(DESTDIR)$(libdir)/libre2.$(SOEXTVER00)
+	ln -sf libre2.$(SOEXTVER00) $(DESTDIR)$(libdir)/libre2.$(SOEXTVER)
+	ln -sf libre2.$(SOEXTVER00) $(DESTDIR)$(libdir)/libre2.$(SOEXT)
+	$(INSTALL_DATA) re2.pc $(DESTDIR)$(libdir)/pkgconfig/re2.pc
+	$(SED_INPLACE) -e "s#@prefix@#${prefix}#" $(DESTDIR)$(libdir)/pkgconfig/re2.pc
+	$(SED_INPLACE) -e "s#@exec_prefix@#${exec_prefix}#" $(DESTDIR)$(libdir)/pkgconfig/re2.pc
+	$(SED_INPLACE) -e "s#@includedir@#${includedir}#" $(DESTDIR)$(libdir)/pkgconfig/re2.pc
+	$(SED_INPLACE) -e "s#@libdir@#${libdir}#" $(DESTDIR)$(libdir)/pkgconfig/re2.pc
 
-testinstall:
+testinstall: static-testinstall shared-testinstall
+	@echo
+	@echo Install tests passed.
+	@echo
+
+static-testinstall: CXXFLAGS:=-std=c++11 -pthread -I$(DESTDIR)$(includedir) $(CXXFLAGS)
+static-testinstall: LDFLAGS:=-pthread -L$(DESTDIR)$(libdir) -l:libre2.a $(LDICU) $(LDFLAGS)
+static-testinstall:
 	@mkdir -p obj
-	cp testinstall.cc obj
-	(cd obj && $(CXX) -I$(DESTDIR)$(includedir) -L$(DESTDIR)$(libdir) testinstall.cc -lre2 -pthread -o testinstall)
-	LD_LIBRARY_PATH=$(DESTDIR)$(libdir) obj/testinstall
+	@cp testinstall.cc obj
+ifeq ($(shell uname),Darwin)
+	@echo Skipping test for libre2.a on Darwin.
+else ifeq ($(shell uname),SunOS)
+	@echo Skipping test for libre2.a on SunOS.
+else
+	(cd obj && $(CXX) testinstall.cc -o testinstall $(CXXFLAGS) $(LDFLAGS))
+	obj/testinstall
+endif
+
+shared-testinstall: CXXFLAGS:=-std=c++11 -pthread -I$(DESTDIR)$(includedir) $(CXXFLAGS)
+shared-testinstall: LDFLAGS:=-pthread -L$(DESTDIR)$(libdir) -lre2 $(LDICU) $(LDFLAGS)
+shared-testinstall:
+	@mkdir -p obj
+	@cp testinstall.cc obj
+	(cd obj && $(CXX) testinstall.cc -o testinstall $(CXXFLAGS) $(LDFLAGS))
+ifeq ($(shell uname),Darwin)
+	DYLD_LIBRARY_PATH="$(DESTDIR)$(libdir):$(DYLD_LIBRARY_PATH)" obj/testinstall
+else
+	LD_LIBRARY_PATH="$(DESTDIR)$(libdir):$(LD_LIBRARY_PATH)" obj/testinstall
+endif
 
 benchlog: obj/test/regexp_benchmark
 	(echo '==BENCHMARK==' `hostname` `date`; \
-	  (uname -a; $(CXX) --version; hg identify; file obj/test/regexp_benchmark) | sed 's/^/# /'; \
+	  (uname -a; $(CXX) --version; git rev-parse --short HEAD; file obj/test/regexp_benchmark) | sed 's/^/# /'; \
 	  echo; \
 	  ./obj/test/regexp_benchmark 'PCRE|RE2') | tee -a benchlog.$$(hostname | sed 's/\..*//')
 
@@ -273,8 +326,9 @@
 	obj/test/% obj/so/test/% obj/dbg/test/%
 
 log:
-	make clean
-	make CXXFLAGS="$(CXXFLAGS) -DLOGGING=1" obj/test/exhaustive{,1,2,3}_test
+	$(MAKE) clean
+	$(MAKE) CXXFLAGS="$(CXXFLAGS) -DLOGGING=1" \
+		$(filter obj/test/exhaustive%_test,$(BIGTESTS))
 	echo '#' RE2 exhaustive tests built by make log >re2-exhaustive.txt
 	echo '#' $$(date) >>re2-exhaustive.txt
 	obj/test/exhaustive_test |grep -v '^PASS$$' >>re2-exhaustive.txt
@@ -282,7 +336,10 @@
 	obj/test/exhaustive2_test |grep -v '^PASS$$' >>re2-exhaustive.txt
 	obj/test/exhaustive3_test |grep -v '^PASS$$' >>re2-exhaustive.txt
 
-	make CXXFLAGS="$(CXXFLAGS) -DLOGGING=1" obj/test/search_test
+	$(MAKE) CXXFLAGS="$(CXXFLAGS) -DLOGGING=1" obj/test/search_test
 	echo '#' RE2 basic search tests built by make $@ >re2-search.txt
 	echo '#' $$(date) >>re2-search.txt
 	obj/test/search_test |grep -v '^PASS$$' >>re2-search.txt
+
+x: x.cc obj/libre2.a
+	g++ -I. -o x x.cc obj/libre2.a
diff --git a/README b/README
index 57b3181..d1ef431 100644
--- a/README
+++ b/README
@@ -1,7 +1,7 @@
 This is the source code repository for RE2, a regular expression library.
 
 For documentation about how to install and use RE2,
-visit http://code.google.com/p/re2/.
+visit https://github.com/google/re2/.
 
 The short version is:
 
@@ -10,10 +10,29 @@
 make install
 make testinstall
 
+There is a fair amount of documentation (including code snippets) in
+the re2.h header file.
+
+More information can be found on the wiki:
+https://github.com/google/re2/wiki
+
+Issue tracker:
+https://github.com/google/re2/issues
+
+Mailing list:
+https://groups.google.com/group/re2-dev
+
 Unless otherwise noted, the RE2 source files are distributed
 under the BSD-style license found in the LICENSE file.
 
 RE2's native language is C++.
-An Inferno wrapper is at http://code.google.com/p/inferno-re2/.
-A Python wrapper is at http://github.com/facebook/pyre2/.
-A Ruby wrapper is at http://github.com/axic/rre2/.
+
+A C wrapper is at https://github.com/marcomaggi/cre2/.
+An Erlang wrapper is at https://github.com/dukesoferl/re2/ and on Hex (hex.pm).
+An Inferno wrapper is at https://github.com/powerman/inferno-re2/.
+A Node.js wrapper is at https://github.com/uhop/node-re2/ and on NPM (npmjs.com).
+An OCaml wrapper is at https://github.com/janestreet/re2/ and on OPAM (opam.ocaml.org).
+A Perl wrapper is at https://github.com/dgl/re-engine-RE2/ and on CPAN (cpan.org).
+A Python wrapper is at https://github.com/facebook/pyre2/ and on PyPI (pypi.org).
+An R wrapper is at https://github.com/qinwf/re2r/ and on CRAN (cran.r-project.org).
+A Ruby wrapper is at https://github.com/mudge/re2/ and on RubyGems (rubygems.org).
diff --git a/README.android b/README.android
index 4911c3c..9d6b9ec 100644
--- a/README.android
+++ b/README.android
@@ -1,28 +1,10 @@
 Code obtained from
 ------------------
 
-https://re2.googlecode.com/files/re2-20130115.tgz
+https://github.com/google/re2
 
 Version
 -------
 
-re2-20130115.tgz
-
-Changes required to build using stlport on Android as follows (full diff)
--------------------------------------------------------------------------
-util/util.h:
-
-44,53c44
-< #if defined(ANDROID)
-< 
-< #if defined(_STLPORT_VERSION)
-< #include <unordered_set>      // using stlport
-< #else
-< #include <tr1/unordered_set>  // using gnustl
-< #endif
-< using std::tr1::unordered_set;
-<  
-< #elif defined(__GNUC__) && !defined(USE_CXX0X)
----
-> #if defined(__GNUC__) && !defined(USE_CXX0X)
-
+Commit 79ef3b2d31f06493f687ef9e947d9632bad54b9b
+dated 2019-02-13
diff --git a/README.version b/README.version
index b48b8b7..6332133 100644
--- a/README.version
+++ b/README.version
@@ -1,3 +1,2 @@
-URL: https://re2.googlecode.com/files/re2-20130115.tgz
-Version: 20130115
-BugComponent: 14890
+URL: https://github.com/google/re2
+Version: 79ef3b2d31f06493f687ef9e947d9632bad54b9b
diff --git a/WORKSPACE b/WORKSPACE
new file mode 100644
index 0000000..de481fe
--- /dev/null
+++ b/WORKSPACE
@@ -0,0 +1,6 @@
+# Copyright 2009 The RE2 Authors.  All Rights Reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Bazel (http://bazel.io/) WORKSPACE file for RE2.
+workspace(name = "com_googlesource_code_re2")
diff --git a/benchlog/benchplot.py b/benchlog/benchplot.py
new file mode 100755
index 0000000..104abe8
--- /dev/null
+++ b/benchlog/benchplot.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+
+import argparse     # for ArgumentParser
+import subprocess   # for Popen
+import tempfile     # for NamedTemporaryFile
+import os           # for remove
+
+class gnuplot(object):
+
+    output = "result.png"
+
+    script = """
+             set terminal png size 1024, 768
+             set output "{}.png"
+             set title "re2 benchlog"
+             set datafile separator ";"
+             set grid x y
+             set ylabel "MB/s"
+             set autoscale
+             plot """
+
+    template = """'{}' using 1:5:xticlabels(2) with linespoints linewidth 3 title "{}",\\\n"""
+
+    benchdata = dict()
+    tempfiles = []
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        """
+        remove all temporary files
+        """
+
+        for filename in self.tempfiles:
+            os.remove(filename)
+
+    def parse_re2_benchlog(self, filename):
+        """
+        parse the input benchlog and return a dictionary contain bench data
+        """
+
+        benchdata = self.benchdata
+
+        with open(filename) as f:
+
+            for raw in f.readlines():
+
+                data = raw.split('\t')
+
+                if len(data) == 4:
+
+                    data = data[0].split('/') + data[1:]
+                    data = list(map(str.strip, data))
+
+                    if not benchdata.get(data[0]):
+                        benchdata[data[0]] = [ data[1:] ]
+                    else:
+                        benchdata[data[0]].append(data[1:])
+
+    def gen_csv(self):
+        """
+        generate temporary csv files
+        """
+
+        for name, data in self.benchdata.items():
+
+            with tempfile.NamedTemporaryFile(delete=False) as f:
+
+                for index, line in enumerate(data):
+                    f.write('{};{}\n'.format(index, ';'.join(line)).encode())
+
+                self.tempfiles.append(f.name)
+                self.script = self.script + self.template.format(f.name, name)
+
+    def run(self):
+        self.gen_csv()
+        script = self.script[:-3].format(self.output)
+        command = subprocess.Popen(['gnuplot'], stdin=subprocess.PIPE)
+        command.communicate(script.encode())
+
+
+if __name__ == '__main__':
+
+    parser = argparse.ArgumentParser(description='generate plots for benchlog')
+    parser.add_argument('benchlog', type=str, help='benchlog generated by re2')
+    args = parser.parse_args()
+
+    try:
+        subprocess.Popen(['gnuplot'], stdin=subprocess.PIPE)
+    except FileNotFoundError:
+        print('you can install "gnuplot" to generate plots automatically')
+        exit(1)
+
+    with gnuplot() as plot:
+        plot.output = args.benchlog
+        plot.parse_re2_benchlog(args.benchlog)
+        plot.run()
diff --git a/doc/mksyntaxgo b/doc/mksyntaxgo
index 42e87d6..caad9b6 100755
--- a/doc/mksyntaxgo
+++ b/doc/mksyntaxgo
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 set -e
-out=$GOROOT/src/pkg/regexp/syntax/doc.go
+out=$GOROOT/src/regexp/syntax/doc.go
 cp syntax.txt $out
 sam -d $out <<'!'
 ,x g/NOT SUPPORTED/d
diff --git a/doc/syntax.html b/doc/syntax.html
index 7f5e15a..aa08b11 100644
--- a/doc/syntax.html
+++ b/doc/syntax.html
@@ -11,16 +11,15 @@
 <tr><td colspan=2>This page lists the regular expression syntax accepted by RE2.</td></tr>
 <tr><td colspan=2>It also lists syntax accepted by PCRE, PERL, and VIM.</td></tr>
 <tr><td colspan=2>Grayed out expressions are not supported by RE2.</td></tr>
-<tr><td colspan=2>See <a href="http://go/re2">http://go/re2</a> and <a href="http://go/re2quick">http://go/re2quick</a>.</td></tr>
 <tr><td></td></tr>
 <tr><td colspan=2><b>Single characters:</b></td></tr>
-<tr><td><code>.</code></td><td>any character, including newline (s=true)</td></tr>
+<tr><td><code>.</code></td><td>any character, possibly including newline (s=true)</td></tr>
 <tr><td><code>[xyz]</code></td><td>character class</td></tr>
 <tr><td><code>[^xyz]</code></td><td>negated character class</td></tr>
 <tr><td><code>\d</code></td><td>Perl character class</td></tr>
 <tr><td><code>\D</code></td><td>negated Perl character class</td></tr>
-<tr><td><code>[:alpha:]</code></td><td>ASCII character class</td></tr>
-<tr><td><code>[:^alpha:]</code></td><td>negated ASCII character class</td></tr>
+<tr><td><code>[[:alpha:]]</code></td><td>ASCII character class</td></tr>
+<tr><td><code>[[:^alpha:]]</code></td><td>negated ASCII character class</td></tr>
 <tr><td><code>\pN</code></td><td>Unicode character class (one-letter name)</td></tr>
 <tr><td><code>\p{Greek}</code></td><td>Unicode character class</td></tr>
 <tr><td><code>\PN</code></td><td>negated Unicode character class (one-letter name)</td></tr>
@@ -62,7 +61,7 @@
 <tr><td><code><font color=#808080>(?&lt;name&gt;re)</font></code></td><td>named &amp; numbered capturing group </td></tr>
 <tr><td><code><font color=#808080>(?'name're)</font></code></td><td>named &amp; numbered capturing group </td></tr>
 <tr><td><code>(?:re)</code></td><td>non-capturing group</td></tr>
-<tr><td><code>(?flags)</code></td><td>set flags until outer paren closes; non-capturing</td></tr>
+<tr><td><code>(?flags)</code></td><td>set flags within current group; non-capturing</td></tr>
 <tr><td><code>(?flags:re)</code></td><td>set flags during re; non-capturing</td></tr>
 <tr><td><code><font color=#808080>(?#text)</font></code></td><td>comment </td></tr>
 <tr><td><code><font color=#808080>(?|x|y|z)</font></code></td><td>branch numbering reset </td></tr>
@@ -72,16 +71,16 @@
 <tr><td></td></tr>
 <tr><td colspan=2><b>Flags:</b></td></tr>
 <tr><td><code>i</code></td><td>case-insensitive (default false)</td></tr>
-<tr><td><code>m</code></td><td>multi-line mode (default false)</td></tr>
+<tr><td><code>m</code></td><td>multi-line mode: <code>^</code> and <code>$</code> match begin/end line in addition to begin/end text (default false)</td></tr>
 <tr><td><code>s</code></td><td>let <code>.</code> match <code>\n</code> (default false)</td></tr>
 <tr><td><code>U</code></td><td>ungreedy: swap meaning of <code>x*</code> and <code>x*?</code>, <code>x+</code> and <code>x+?</code>, etc (default false)</td></tr>
 <tr><td colspan=2>Flag syntax is <code>xyz</code> (set) or <code>-xyz</code> (clear) or <code>xy-z</code> (set <code>xy</code>, clear <code>z</code>).</td></tr>
 <tr><td></td></tr>
 <tr><td colspan=2><b>Empty strings:</b></td></tr>
 <tr><td><code>^</code></td><td>at beginning of text or line (<code>m</code>=true)</td></tr>
-<tr><td><code>$</code></td><td>at end of text or line (<code>m</code>=true)</td></tr>
+<tr><td><code>$</code></td><td>at end of text (like <code>\z</code> not <code>\Z</code>) or line (<code>m</code>=true)</td></tr>
 <tr><td><code>\A</code></td><td>at beginning of text</td></tr>
-<tr><td><code>\b</code></td><td>at word boundary (<code>\w</code> to left and <code>\W</code> to right or vice versa)</td></tr>
+<tr><td><code>\b</code></td><td>at word boundary (<code>\w</code> on one side and <code>\W</code>, <code>\A</code>, or <code>\z</code> on the other)</td></tr>
 <tr><td><code>\B</code></td><td>not a word boundary</td></tr>
 <tr><td><code><font color=#808080>\G</font></code></td><td>at beginning of subtext being searched  <font size=-2>PCRE</font></td></tr>
 <tr><td><code><font color=#808080>\G</font></code></td><td>at end of last match  <font size=-2>PERL</font></td></tr>
@@ -181,20 +180,20 @@
 <tr><td><code><font color=#808080>\V</font></code></td><td>not vertical space </td></tr>
 <tr><td></td></tr>
 <tr><td colspan=2><b>ASCII character classes:</b></td></tr>
-<tr><td><code>[:alnum:]</code></td><td>alphanumeric (≡ <code>[0-9A-Za-z]</code>)</td></tr>
-<tr><td><code>[:alpha:]</code></td><td>alphabetic (≡ <code>[A-Za-z]</code>)</td></tr>
-<tr><td><code>[:ascii:]</code></td><td>ASCII (≡ <code>[\x00-\x7F]</code>)</td></tr>
-<tr><td><code>[:blank:]</code></td><td>blank (≡ <code>[\t ]</code>)</td></tr>
-<tr><td><code>[:cntrl:]</code></td><td>control (≡ <code>[\x00-\x1F\x7F]</code>)</td></tr>
-<tr><td><code>[:digit:]</code></td><td>digits (≡ <code>[0-9]</code>)</td></tr>
-<tr><td><code>[:graph:]</code></td><td>graphical (≡ <code>[!-~] == [A-Za-z0-9!"#$%&amp;'()*+,\-./:;&lt;=&gt;?@[\\\]^_`{|}~]</code>)</td></tr>
-<tr><td><code>[:lower:]</code></td><td>lower case (≡ <code>[a-z]</code>)</td></tr>
-<tr><td><code>[:print:]</code></td><td>printable (≡ <code>[ -~] == [ [:graph:]]</code>)</td></tr>
-<tr><td><code>[:punct:]</code></td><td>punctuation (≡ <code>[!-/:-@[-`{-~]</code>)</td></tr>
-<tr><td><code>[:space:]</code></td><td>whitespace (≡ <code>[\t\n\v\f\r ]</code>)</td></tr>
-<tr><td><code>[:upper:]</code></td><td>upper case (≡ <code>[A-Z]</code>)</td></tr>
-<tr><td><code>[:word:]</code></td><td>word characters (≡ <code>[0-9A-Za-z_]</code>)</td></tr>
-<tr><td><code>[:xdigit:]</code></td><td>hex digit (≡ <code>[0-9A-Fa-f]</code>)</td></tr>
+<tr><td><code>[[:alnum:]]</code></td><td>alphanumeric (≡ <code>[0-9A-Za-z]</code>)</td></tr>
+<tr><td><code>[[:alpha:]]</code></td><td>alphabetic (≡ <code>[A-Za-z]</code>)</td></tr>
+<tr><td><code>[[:ascii:]]</code></td><td>ASCII (≡ <code>[\x00-\x7F]</code>)</td></tr>
+<tr><td><code>[[:blank:]]</code></td><td>blank (≡ <code>[\t ]</code>)</td></tr>
+<tr><td><code>[[:cntrl:]]</code></td><td>control (≡ <code>[\x00-\x1F\x7F]</code>)</td></tr>
+<tr><td><code>[[:digit:]]</code></td><td>digits (≡ <code>[0-9]</code>)</td></tr>
+<tr><td><code>[[:graph:]]</code></td><td>graphical (≡ <code>[!-~] == [A-Za-z0-9!"#$%&amp;'()*+,\-./:;&lt;=&gt;?@[\\\]^_`{|}~]</code>)</td></tr>
+<tr><td><code>[[:lower:]]</code></td><td>lower case (≡ <code>[a-z]</code>)</td></tr>
+<tr><td><code>[[:print:]]</code></td><td>printable (≡ <code>[ -~] == [ [:graph:]]</code>)</td></tr>
+<tr><td><code>[[:punct:]]</code></td><td>punctuation (≡ <code>[!-/:-@[-`{-~]</code>)</td></tr>
+<tr><td><code>[[:space:]]</code></td><td>whitespace (≡ <code>[\t\n\v\f\r ]</code>)</td></tr>
+<tr><td><code>[[:upper:]]</code></td><td>upper case (≡ <code>[A-Z]</code>)</td></tr>
+<tr><td><code>[[:word:]]</code></td><td>word characters (≡ <code>[0-9A-Za-z_]</code>)</td></tr>
+<tr><td><code>[[:xdigit:]]</code></td><td>hex digit (≡ <code>[0-9A-Fa-f]</code>)</td></tr>
 <tr><td></td></tr>
 <tr><td colspan=2><b>Unicode character class names--general category:</b></td></tr>
 <tr><td><code>C</code></td><td>other</td></tr>
@@ -241,13 +240,17 @@
 <tr><td><code>Arabic</code></td><td>Arabic</td></tr>
 <tr><td><code>Armenian</code></td><td>Armenian</td></tr>
 <tr><td><code>Balinese</code></td><td>Balinese</td></tr>
+<tr><td><code>Bamum</code></td><td>Bamum</td></tr>
+<tr><td><code>Batak</code></td><td>Batak</td></tr>
 <tr><td><code>Bengali</code></td><td>Bengali</td></tr>
 <tr><td><code>Bopomofo</code></td><td>Bopomofo</td></tr>
+<tr><td><code>Brahmi</code></td><td>Brahmi</td></tr>
 <tr><td><code>Braille</code></td><td>Braille</td></tr>
 <tr><td><code>Buginese</code></td><td>Buginese</td></tr>
 <tr><td><code>Buhid</code></td><td>Buhid</td></tr>
 <tr><td><code>Canadian_Aboriginal</code></td><td>Canadian Aboriginal</td></tr>
 <tr><td><code>Carian</code></td><td>Carian</td></tr>
+<tr><td><code>Chakma</code></td><td>Chakma</td></tr>
 <tr><td><code>Cham</code></td><td>Cham</td></tr>
 <tr><td><code>Cherokee</code></td><td>Cherokee</td></tr>
 <tr><td><code>Common</code></td><td>characters not specific to one script</td></tr>
@@ -257,6 +260,7 @@
 <tr><td><code>Cyrillic</code></td><td>Cyrillic</td></tr>
 <tr><td><code>Deseret</code></td><td>Deseret</td></tr>
 <tr><td><code>Devanagari</code></td><td>Devanagari</td></tr>
+<tr><td><code>Egyptian_Hieroglyphs</code></td><td>Egyptian Hieroglyphs</td></tr>
 <tr><td><code>Ethiopic</code></td><td>Ethiopic</td></tr>
 <tr><td><code>Georgian</code></td><td>Georgian</td></tr>
 <tr><td><code>Glagolitic</code></td><td>Glagolitic</td></tr>
@@ -269,7 +273,12 @@
 <tr><td><code>Hanunoo</code></td><td>Hanunoo</td></tr>
 <tr><td><code>Hebrew</code></td><td>Hebrew</td></tr>
 <tr><td><code>Hiragana</code></td><td>Hiragana</td></tr>
+<tr><td><code>Imperial_Aramaic</code></td><td>Imperial Aramaic</td></tr>
 <tr><td><code>Inherited</code></td><td>inherit script from previous character</td></tr>
+<tr><td><code>Inscriptional_Pahlavi</code></td><td>Inscriptional Pahlavi</td></tr>
+<tr><td><code>Inscriptional_Parthian</code></td><td>Inscriptional Parthian</td></tr>
+<tr><td><code>Javanese</code></td><td>Javanese</td></tr>
+<tr><td><code>Kaithi</code></td><td>Kaithi</td></tr>
 <tr><td><code>Kannada</code></td><td>Kannada</td></tr>
 <tr><td><code>Katakana</code></td><td>Katakana</td></tr>
 <tr><td><code>Kayah_Li</code></td><td>Kayah Li</td></tr>
@@ -283,6 +292,11 @@
 <tr><td><code>Lycian</code></td><td>Lycian</td></tr>
 <tr><td><code>Lydian</code></td><td>Lydian</td></tr>
 <tr><td><code>Malayalam</code></td><td>Malayalam</td></tr>
+<tr><td><code>Mandaic</code></td><td>Mandaic</td></tr>
+<tr><td><code>Meetei_Mayek</code></td><td>Meetei Mayek</td></tr>
+<tr><td><code>Meroitic_Cursive</code></td><td>Meroitic Cursive</td></tr>
+<tr><td><code>Meroitic_Hieroglyphs</code></td><td>Meroitic Hieroglyphs</td></tr>
+<tr><td><code>Miao</code></td><td>Miao</td></tr>
 <tr><td><code>Mongolian</code></td><td>Mongolian</td></tr>
 <tr><td><code>Myanmar</code></td><td>Myanmar</td></tr>
 <tr><td><code>New_Tai_Lue</code></td><td>New Tai Lue (aka Simplified Tai Lue)</td></tr>
@@ -291,6 +305,8 @@
 <tr><td><code>Ol_Chiki</code></td><td>Ol Chiki</td></tr>
 <tr><td><code>Old_Italic</code></td><td>Old Italic</td></tr>
 <tr><td><code>Old_Persian</code></td><td>Old Persian</td></tr>
+<tr><td><code>Old_South_Arabian</code></td><td>Old South Arabian</td></tr>
+<tr><td><code>Old_Turkic</code></td><td>Old Turkic</td></tr>
 <tr><td><code>Oriya</code></td><td>Oriya</td></tr>
 <tr><td><code>Osmanya</code></td><td>Osmanya</td></tr>
 <tr><td><code>Phags_Pa</code></td><td>'Phags Pa</td></tr>
@@ -298,14 +314,19 @@
 <tr><td><code>Rejang</code></td><td>Rejang</td></tr>
 <tr><td><code>Runic</code></td><td>Runic</td></tr>
 <tr><td><code>Saurashtra</code></td><td>Saurashtra</td></tr>
+<tr><td><code>Sharada</code></td><td>Sharada</td></tr>
 <tr><td><code>Shavian</code></td><td>Shavian</td></tr>
 <tr><td><code>Sinhala</code></td><td>Sinhala</td></tr>
+<tr><td><code>Sora_Sompeng</code></td><td>Sora Sompeng</td></tr>
 <tr><td><code>Sundanese</code></td><td>Sundanese</td></tr>
 <tr><td><code>Syloti_Nagri</code></td><td>Syloti Nagri</td></tr>
 <tr><td><code>Syriac</code></td><td>Syriac</td></tr>
 <tr><td><code>Tagalog</code></td><td>Tagalog</td></tr>
 <tr><td><code>Tagbanwa</code></td><td>Tagbanwa</td></tr>
 <tr><td><code>Tai_Le</code></td><td>Tai Le</td></tr>
+<tr><td><code>Tai_Tham</code></td><td>Tai Tham</td></tr>
+<tr><td><code>Tai_Viet</code></td><td>Tai Viet</td></tr>
+<tr><td><code>Takri</code></td><td>Takri</td></tr>
 <tr><td><code>Tamil</code></td><td>Tamil</td></tr>
 <tr><td><code>Telugu</code></td><td>Telugu</td></tr>
 <tr><td><code>Thaana</code></td><td>Thaana</td></tr>
diff --git a/doc/syntax.txt b/doc/syntax.txt
index f940750..c87494e 100644
--- a/doc/syntax.txt
+++ b/doc/syntax.txt
@@ -7,8 +7,8 @@
 [^xyz]	negated character class
 \d	Perl character class
 \D	negated Perl character class
-[:alpha:]	ASCII character class
-[:^alpha:]	negated ASCII character class
+[[:alpha:]]	ASCII character class
+[[:^alpha:]]	negated ASCII character class
 \pN	Unicode character class (one-letter name)
 \p{Greek}	Unicode character class
 \PN	negated Unicode character class (one-letter name)
@@ -36,6 +36,10 @@
 x{-n}	(== x{n}?) NOT SUPPORTED vim
 x=	(== x?) NOT SUPPORTED vim
 
+Implementation restriction: The counting forms «x{n,m}», «x{n,}», and «x{n}»
+reject forms that create a minimum or maximum repetition count above 1000.
+Unlimited repetitions are not subject to this restriction.
+
 Possessive repetitions:
 x*+	zero or more «x», possessive NOT SUPPORTED
 x++	one or more «x», possessive NOT SUPPORTED
@@ -45,10 +49,10 @@
 x{n}+	exactly «n» «x», possessive NOT SUPPORTED
 
 Grouping:
-(re)	numbered capturing group
-(?P<name>re)	named & numbered capturing group
-(?<name>re)	named & numbered capturing group NOT SUPPORTED
-(?'name're)	named & numbered capturing group NOT SUPPORTED
+(re)	numbered capturing group (submatch)
+(?P<name>re)	named & numbered capturing group (submatch)
+(?<name>re)	named & numbered capturing group (submatch) NOT SUPPORTED
+(?'name're)	named & numbered capturing group (submatch) NOT SUPPORTED
 (?:re)	non-capturing group
 (?flags)	set flags within current group; non-capturing
 (?flags:re)	set flags during re; non-capturing
@@ -69,8 +73,8 @@
 ^	at beginning of text or line («m»=true)
 $	at end of text (like «\z» not «\Z») or line («m»=true)
 \A	at beginning of text
-\b	at word boundary («\w» on one side and «\W», «\A», or «\z» on the other)
-\B	not a word boundary
+\b	at ASCII word boundary («\w» on one side and «\W», «\A», or «\z» on the other)
+\B	not at ASCII word boundary
 \G	at beginning of subtext being searched NOT SUPPORTED pcre
 \G	at end of last match NOT SUPPORTED perl
 \Z	at end of text, or before newline at end of text NOT SUPPORTED
@@ -155,7 +159,7 @@
 [\p{Name}]	named Unicode property inside character class (== \p{Name})
 [^\p{Name}]	named Unicode property inside negated character class (== \P{Name})
 
-Perl character classes:
+Perl character classes (all ASCII-only):
 \d	digits (== [0-9])
 \D	not digits (== [^0-9])
 \s	whitespace (== [\t\n\f\r ])
@@ -169,20 +173,20 @@
 \V	not vertical space NOT SUPPORTED
 
 ASCII character classes:
-[:alnum:]	alphanumeric (== [0-9A-Za-z])
-[:alpha:]	alphabetic (== [A-Za-z])
-[:ascii:]	ASCII (== [\x00-\x7F])
-[:blank:]	blank (== [\t ])
-[:cntrl:]	control (== [\x00-\x1F\x7F])
-[:digit:]	digits (== [0-9])
-[:graph:]	graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~])
-[:lower:]	lower case (== [a-z])
-[:print:]	printable (== [ -~] == [ [:graph:]])
-[:punct:]	punctuation (== [!-/:-@[-`{-~])
-[:space:]	whitespace (== [\t\n\v\f\r ])
-[:upper:]	upper case (== [A-Z])
-[:word:]	word characters (== [0-9A-Za-z_])
-[:xdigit:]	hex digit (== [0-9A-Fa-f])
+[[:alnum:]]	alphanumeric (== [0-9A-Za-z])
+[[:alpha:]]	alphabetic (== [A-Za-z])
+[[:ascii:]]	ASCII (== [\x00-\x7F])
+[[:blank:]]	blank (== [\t ])
+[[:cntrl:]]	control (== [\x00-\x1F\x7F])
+[[:digit:]]	digits (== [0-9])
+[[:graph:]]	graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~])
+[[:lower:]]	lower case (== [a-z])
+[[:print:]]	printable (== [ -~] == [ [:graph:]])
+[[:punct:]]	punctuation (== [!-/:-@[-`{-~])
+[[:space:]]	whitespace (== [\t\n\v\f\r ])
+[[:upper:]]	upper case (== [A-Z])
+[[:word:]]	word characters (== [0-9A-Za-z_])
+[[:xdigit:]]	hex digit (== [0-9A-Fa-f])
 
 Unicode character class names--general category:
 C	other
@@ -226,83 +230,154 @@
 Zs	space separator
 
 Unicode character class names--scripts:
-Arabic	Arabic
-Armenian	Armenian
-Balinese	Balinese
-Bengali	Bengali
-Bopomofo	Bopomofo
-Braille	Braille
-Buginese	Buginese
-Buhid	Buhid
-Canadian_Aboriginal	Canadian Aboriginal
-Carian	Carian
-Cham	Cham
-Cherokee	Cherokee
-Common	characters not specific to one script
-Coptic	Coptic
-Cuneiform	Cuneiform
-Cypriot	Cypriot
-Cyrillic	Cyrillic
-Deseret	Deseret
-Devanagari	Devanagari
-Ethiopic	Ethiopic
-Georgian	Georgian
-Glagolitic	Glagolitic
-Gothic	Gothic
-Greek	Greek
-Gujarati	Gujarati
-Gurmukhi	Gurmukhi
-Han	Han
-Hangul	Hangul
-Hanunoo	Hanunoo
-Hebrew	Hebrew
-Hiragana	Hiragana
-Inherited	inherit script from previous character
-Kannada	Kannada
-Katakana	Katakana
-Kayah_Li	Kayah Li
-Kharoshthi	Kharoshthi
-Khmer	Khmer
-Lao	Lao
-Latin	Latin
-Lepcha	Lepcha
-Limbu	Limbu
-Linear_B	Linear B
-Lycian	Lycian
-Lydian	Lydian
-Malayalam	Malayalam
-Mongolian	Mongolian
-Myanmar	Myanmar
-New_Tai_Lue	New Tai Lue (aka Simplified Tai Lue)
-Nko	Nko
-Ogham	Ogham
-Ol_Chiki	Ol Chiki
-Old_Italic	Old Italic
-Old_Persian	Old Persian
-Oriya	Oriya
-Osmanya	Osmanya
-Phags_Pa	'Phags Pa
-Phoenician	Phoenician
-Rejang	Rejang
-Runic	Runic
-Saurashtra	Saurashtra
-Shavian	Shavian
-Sinhala	Sinhala
-Sundanese	Sundanese
-Syloti_Nagri	Syloti Nagri
-Syriac	Syriac
-Tagalog	Tagalog
-Tagbanwa	Tagbanwa
-Tai_Le	Tai Le
-Tamil	Tamil
-Telugu	Telugu
-Thaana	Thaana
-Thai	Thai
-Tibetan	Tibetan
-Tifinagh	Tifinagh
-Ugaritic	Ugaritic
-Vai	Vai
-Yi	Yi
+Adlam
+Ahom
+Anatolian_Hieroglyphs
+Arabic
+Armenian
+Avestan
+Balinese
+Bamum
+Bassa_Vah
+Batak
+Bengali
+Bhaiksuki
+Bopomofo
+Brahmi
+Braille
+Buginese
+Buhid
+Canadian_Aboriginal
+Carian
+Caucasian_Albanian
+Chakma
+Cham
+Cherokee
+Common
+Coptic
+Cuneiform
+Cypriot
+Cyrillic
+Deseret
+Devanagari
+Dogra
+Duployan
+Egyptian_Hieroglyphs
+Elbasan
+Ethiopic
+Georgian
+Glagolitic
+Gothic
+Grantha
+Greek
+Gujarati
+Gunjala_Gondi
+Gurmukhi
+Han
+Hangul
+Hanifi_Rohingya
+Hanunoo
+Hatran
+Hebrew
+Hiragana
+Imperial_Aramaic
+Inherited
+Inscriptional_Pahlavi
+Inscriptional_Parthian
+Javanese
+Kaithi
+Kannada
+Katakana
+Kayah_Li
+Kharoshthi
+Khmer
+Khojki
+Khudawadi
+Lao
+Latin
+Lepcha
+Limbu
+Linear_A
+Linear_B
+Lisu
+Lycian
+Lydian
+Mahajani
+Makasar
+Malayalam
+Mandaic
+Manichaean
+Marchen
+Masaram_Gondi
+Medefaidrin
+Meetei_Mayek
+Mende_Kikakui
+Meroitic_Cursive
+Meroitic_Hieroglyphs
+Miao
+Modi
+Mongolian
+Mro
+Multani
+Myanmar
+Nabataean
+New_Tai_Lue
+Newa
+Nko
+Nushu
+Ogham
+Ol_Chiki
+Old_Hungarian
+Old_Italic
+Old_North_Arabian
+Old_Permic
+Old_Persian
+Old_Sogdian
+Old_South_Arabian
+Old_Turkic
+Oriya
+Osage
+Osmanya
+Pahawh_Hmong
+Palmyrene
+Pau_Cin_Hau
+Phags_Pa
+Phoenician
+Psalter_Pahlavi
+Rejang
+Runic
+Samaritan
+Saurashtra
+Sharada
+Shavian
+Siddham
+SignWriting
+Sinhala
+Sogdian
+Sora_Sompeng
+Soyombo
+Sundanese
+Syloti_Nagri
+Syriac
+Tagalog
+Tagbanwa
+Tai_Le
+Tai_Tham
+Tai_Viet
+Takri
+Tamil
+Tangut
+Telugu
+Thaana
+Thai
+Tibetan
+Tifinagh
+Tirhuta
+Ugaritic
+Vai
+Warang_Citi
+Yi
+Zanabazar_Square
 
 Vim character classes:
 \i	identifier character NOT SUPPORTED vim
diff --git a/kokoro/bazel.sh b/kokoro/bazel.sh
new file mode 100755
index 0000000..6f25982
--- /dev/null
+++ b/kokoro/bazel.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+set -eux
+
+cd git/re2
+
+bazel clean
+bazel build --compilation_mode=dbg -- //...
+bazel test  --compilation_mode=dbg --test_output=errors -- //... \
+  -//:dfa_test \
+  -//:exhaustive1_test \
+  -//:exhaustive2_test \
+  -//:exhaustive3_test \
+  -//:exhaustive_test \
+  -//:random_test
+
+bazel clean
+bazel build --compilation_mode=opt -- //...
+bazel test  --compilation_mode=opt --test_output=errors -- //... \
+  -//:dfa_test \
+  -//:exhaustive1_test \
+  -//:exhaustive2_test \
+  -//:exhaustive3_test \
+  -//:exhaustive_test \
+  -//:random_test
+
+exit 0
diff --git a/kokoro/cmake.sh b/kokoro/cmake.sh
new file mode 100755
index 0000000..999fbfe
--- /dev/null
+++ b/kokoro/cmake.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+set -eux
+
+cd git/re2
+
+case "${KOKORO_JOB_NAME}" in
+  */windows-*)
+    CMAKE_G_A_FLAGS=('-G' 'Visual Studio 14 2015' '-A' 'x64')
+    ;;
+  *)
+    CMAKE_G_A_FLAGS=()
+    # Work around a bug in older versions of bash. :/
+    set +u
+    ;;
+esac
+
+cmake -D CMAKE_BUILD_TYPE=Debug "${CMAKE_G_A_FLAGS[@]}" .
+cmake --build . --config Debug --clean-first
+ctest -C Debug --output-on-failure -E 'dfa|exhaustive|random'
+
+cmake -D CMAKE_BUILD_TYPE=Release "${CMAKE_G_A_FLAGS[@]}" .
+cmake --build . --config Release --clean-first
+ctest -C Release --output-on-failure -E 'dfa|exhaustive|random'
+
+exit 0
diff --git a/kokoro/macos-bazel.cfg b/kokoro/macos-bazel.cfg
new file mode 100644
index 0000000..7901981
--- /dev/null
+++ b/kokoro/macos-bazel.cfg
@@ -0,0 +1 @@
+build_file: "re2/kokoro/macos-bazel.sh"
diff --git a/kokoro/macos-bazel.sh b/kokoro/macos-bazel.sh
new file mode 100755
index 0000000..e43c852
--- /dev/null
+++ b/kokoro/macos-bazel.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+set -eux
+bash git/re2/kokoro/bazel.sh
+exit $?
diff --git a/kokoro/macos-cmake.cfg b/kokoro/macos-cmake.cfg
new file mode 100644
index 0000000..5c459e7
--- /dev/null
+++ b/kokoro/macos-cmake.cfg
@@ -0,0 +1 @@
+build_file: "re2/kokoro/macos-cmake.sh"
diff --git a/kokoro/macos-cmake.sh b/kokoro/macos-cmake.sh
new file mode 100755
index 0000000..ef4b7dc
--- /dev/null
+++ b/kokoro/macos-cmake.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+set -eux
+bash git/re2/kokoro/cmake.sh
+exit $?
diff --git a/kokoro/ubuntu-bazel.cfg b/kokoro/ubuntu-bazel.cfg
new file mode 100644
index 0000000..884d14f
--- /dev/null
+++ b/kokoro/ubuntu-bazel.cfg
@@ -0,0 +1 @@
+build_file: "re2/kokoro/ubuntu-bazel.sh"
diff --git a/kokoro/ubuntu-bazel.sh b/kokoro/ubuntu-bazel.sh
new file mode 100755
index 0000000..e43c852
--- /dev/null
+++ b/kokoro/ubuntu-bazel.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+set -eux
+bash git/re2/kokoro/bazel.sh
+exit $?
diff --git a/kokoro/windows-bazel.bat b/kokoro/windows-bazel.bat
new file mode 100755
index 0000000..283f8d2
--- /dev/null
+++ b/kokoro/windows-bazel.bat
@@ -0,0 +1,2 @@
+bash git/re2/kokoro/bazel.sh
+EXIT /B %ERRORLEVEL%
diff --git a/kokoro/windows-bazel.cfg b/kokoro/windows-bazel.cfg
new file mode 100644
index 0000000..18b1ed7
--- /dev/null
+++ b/kokoro/windows-bazel.cfg
@@ -0,0 +1 @@
+build_file: "re2/kokoro/windows-bazel.bat"
diff --git a/kokoro/windows-cmake.bat b/kokoro/windows-cmake.bat
new file mode 100755
index 0000000..77a4db9
--- /dev/null
+++ b/kokoro/windows-cmake.bat
@@ -0,0 +1,2 @@
+bash git/re2/kokoro/cmake.sh
+EXIT /B %ERRORLEVEL%
diff --git a/kokoro/windows-cmake.cfg b/kokoro/windows-cmake.cfg
new file mode 100644
index 0000000..4453eb6
--- /dev/null
+++ b/kokoro/windows-cmake.cfg
@@ -0,0 +1 @@
+build_file: "re2/kokoro/windows-cmake.bat"
diff --git a/lib/codereview/codereview.cfg b/lib/codereview/codereview.cfg
deleted file mode 100644
index 9581920..0000000
--- a/lib/codereview/codereview.cfg
+++ /dev/null
@@ -1 +0,0 @@
-defaultcc: re2-dev@googlegroups.com
diff --git a/lib/codereview/codereview.py b/lib/codereview/codereview.py
deleted file mode 100644
index d26df2a..0000000
--- a/lib/codereview/codereview.py
+++ /dev/null
@@ -1,3562 +0,0 @@
-# coding=utf-8
-# (The line above is necessary so that I can use 世界 in the
-# *comment* below without Python getting all bent out of shape.)
-
-# Copyright 2007-2009 Google Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#	http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''Mercurial interface to codereview.appspot.com.
-
-To configure, set the following options in
-your repository's .hg/hgrc file.
-
-	[extensions]
-	codereview = /path/to/codereview.py
-
-	[codereview]
-	server = codereview.appspot.com
-
-The server should be running Rietveld; see http://code.google.com/p/rietveld/.
-
-In addition to the new commands, this extension introduces
-the file pattern syntax @nnnnnn, where nnnnnn is a change list
-number, to mean the files included in that change list, which
-must be associated with the current client.
-
-For example, if change 123456 contains the files x.go and y.go,
-"hg diff @123456" is equivalent to"hg diff x.go y.go".
-'''
-
-import sys
-
-if __name__ == "__main__":
-	print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
-	sys.exit(2)
-
-# We require Python 2.6 for the json package.
-if sys.version < '2.6':
-	print >>sys.stderr, "The codereview extension requires Python 2.6 or newer."
-	print >>sys.stderr, "You are running Python " + sys.version
-	sys.exit(2)
-
-import json
-import os
-import re
-import stat
-import subprocess
-import threading
-import time
-
-from mercurial import commands as hg_commands
-from mercurial import util as hg_util
-
-defaultcc = None
-codereview_disabled = None
-real_rollback = None
-releaseBranch = None
-server = "codereview.appspot.com"
-server_url_base = None
-
-#######################################################################
-# Normally I would split this into multiple files, but it simplifies
-# import path headaches to keep it all in one file.  Sorry.
-# The different parts of the file are separated by banners like this one.
-
-#######################################################################
-# Helpers
-
-def RelativePath(path, cwd):
-	n = len(cwd)
-	if path.startswith(cwd) and path[n] == '/':
-		return path[n+1:]
-	return path
-
-def Sub(l1, l2):
-	return [l for l in l1 if l not in l2]
-
-def Add(l1, l2):
-	l = l1 + Sub(l2, l1)
-	l.sort()
-	return l
-
-def Intersect(l1, l2):
-	return [l for l in l1 if l in l2]
-
-#######################################################################
-# RE: UNICODE STRING HANDLING
-#
-# Python distinguishes between the str (string of bytes)
-# and unicode (string of code points) types.  Most operations
-# work on either one just fine, but some (like regexp matching)
-# require unicode, and others (like write) require str.
-#
-# As befits the language, Python hides the distinction between
-# unicode and str by converting between them silently, but
-# *only* if all the bytes/code points involved are 7-bit ASCII.
-# This means that if you're not careful, your program works
-# fine on "hello, world" and fails on "hello, 世界".  And of course,
-# the obvious way to be careful - use static types - is unavailable.
-# So the only way is trial and error to find where to put explicit
-# conversions.
-#
-# Because more functions do implicit conversion to str (string of bytes)
-# than do implicit conversion to unicode (string of code points),
-# the convention in this module is to represent all text as str,
-# converting to unicode only when calling a unicode-only function
-# and then converting back to str as soon as possible.
-
-def typecheck(s, t):
-	if type(s) != t:
-		raise hg_util.Abort("type check failed: %s has type %s != %s" % (repr(s), type(s), t))
-
-# If we have to pass unicode instead of str, ustr does that conversion clearly.
-def ustr(s):
-	typecheck(s, str)
-	return s.decode("utf-8")
-
-# Even with those, Mercurial still sometimes turns unicode into str
-# and then tries to use it as ascii.  Change Mercurial's default.
-def set_mercurial_encoding_to_utf8():
-	from mercurial import encoding
-	encoding.encoding = 'utf-8'
-
-set_mercurial_encoding_to_utf8()
-
-# Even with those we still run into problems.
-# I tried to do things by the book but could not convince
-# Mercurial to let me check in a change with UTF-8 in the
-# CL description or author field, no matter how many conversions
-# between str and unicode I inserted and despite changing the
-# default encoding.  I'm tired of this game, so set the default
-# encoding for all of Python to 'utf-8', not 'ascii'.
-def default_to_utf8():
-	import sys
-	stdout, __stdout__ = sys.stdout, sys.__stdout__
-	reload(sys)  # site.py deleted setdefaultencoding; get it back
-	sys.stdout, sys.__stdout__ = stdout, __stdout__
-	sys.setdefaultencoding('utf-8')
-
-default_to_utf8()
-
-#######################################################################
-# Status printer for long-running commands
-
-global_status = None
-
-def set_status(s):
-	# print >>sys.stderr, "\t", time.asctime(), s
-	global global_status
-	global_status = s
-
-class StatusThread(threading.Thread):
-	def __init__(self):
-		threading.Thread.__init__(self)
-	def run(self):
-		# pause a reasonable amount of time before
-		# starting to display status messages, so that
-		# most hg commands won't ever see them.
-		time.sleep(30)
-
-		# now show status every 15 seconds
-		while True:
-			time.sleep(15 - time.time() % 15)
-			s = global_status
-			if s is None:
-				continue
-			if s == "":
-				s = "(unknown status)"
-			print >>sys.stderr, time.asctime(), s
-
-def start_status_thread():
-	t = StatusThread()
-	t.setDaemon(True)  # allowed to exit if t is still running
-	t.start()
-
-#######################################################################
-# Change list parsing.
-#
-# Change lists are stored in .hg/codereview/cl.nnnnnn
-# where nnnnnn is the number assigned by the code review server.
-# Most data about a change list is stored on the code review server
-# too: the description, reviewer, and cc list are all stored there.
-# The only thing in the cl.nnnnnn file is the list of relevant files.
-# Also, the existence of the cl.nnnnnn file marks this repository
-# as the one where the change list lives.
-
-emptydiff = """Index: ~rietveld~placeholder~
-===================================================================
-diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
-new file mode 100644
-"""
-
-class CL(object):
-	def __init__(self, name):
-		typecheck(name, str)
-		self.name = name
-		self.desc = ''
-		self.files = []
-		self.reviewer = []
-		self.cc = []
-		self.url = ''
-		self.local = False
-		self.web = False
-		self.copied_from = None	# None means current user
-		self.mailed = False
-		self.private = False
-		self.lgtm = []
-
-	def DiskText(self):
-		cl = self
-		s = ""
-		if cl.copied_from:
-			s += "Author: " + cl.copied_from + "\n\n"
-		if cl.private:
-			s += "Private: " + str(self.private) + "\n"
-		s += "Mailed: " + str(self.mailed) + "\n"
-		s += "Description:\n"
-		s += Indent(cl.desc, "\t")
-		s += "Files:\n"
-		for f in cl.files:
-			s += "\t" + f + "\n"
-		typecheck(s, str)
-		return s
-
-	def EditorText(self):
-		cl = self
-		s = _change_prolog
-		s += "\n"
-		if cl.copied_from:
-			s += "Author: " + cl.copied_from + "\n"
-		if cl.url != '':
-			s += 'URL: ' + cl.url + '	# cannot edit\n\n'
-		if cl.private:
-			s += "Private: True\n"
-		s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
-		s += "CC: " + JoinComma(cl.cc) + "\n"
-		s += "\n"
-		s += "Description:\n"
-		if cl.desc == '':
-			s += "\t<enter description here>\n"
-		else:
-			s += Indent(cl.desc, "\t")
-		s += "\n"
-		if cl.local or cl.name == "new":
-			s += "Files:\n"
-			for f in cl.files:
-				s += "\t" + f + "\n"
-			s += "\n"
-		typecheck(s, str)
-		return s
-
-	def PendingText(self, quick=False):
-		cl = self
-		s = cl.name + ":" + "\n"
-		s += Indent(cl.desc, "\t")
-		s += "\n"
-		if cl.copied_from:
-			s += "\tAuthor: " + cl.copied_from + "\n"
-		if not quick:
-			s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
-			for (who, line) in cl.lgtm:
-				s += "\t\t" + who + ": " + line + "\n"
-			s += "\tCC: " + JoinComma(cl.cc) + "\n"
-		s += "\tFiles:\n"
-		for f in cl.files:
-			s += "\t\t" + f + "\n"
-		typecheck(s, str)
-		return s
-
-	def Flush(self, ui, repo):
-		if self.name == "new":
-			self.Upload(ui, repo, gofmt_just_warn=True, creating=True)
-		dir = CodeReviewDir(ui, repo)
-		path = dir + '/cl.' + self.name
-		f = open(path+'!', "w")
-		f.write(self.DiskText())
-		f.close()
-		if sys.platform == "win32" and os.path.isfile(path):
-			os.remove(path)
-		os.rename(path+'!', path)
-		if self.web and not self.copied_from:
-			EditDesc(self.name, desc=self.desc,
-				reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc),
-				private=self.private)
-
-	def Delete(self, ui, repo):
-		dir = CodeReviewDir(ui, repo)
-		os.unlink(dir + "/cl." + self.name)
-
-	def Subject(self):
-		s = line1(self.desc)
-		if len(s) > 60:
-			s = s[0:55] + "..."
-		if self.name != "new":
-			s = "code review %s: %s" % (self.name, s)
-		typecheck(s, str)
-		return s
-
-	def Upload(self, ui, repo, send_mail=False, gofmt=True, gofmt_just_warn=False, creating=False, quiet=False):
-		if not self.files and not creating:
-			ui.warn("no files in change list\n")
-		if ui.configbool("codereview", "force_gofmt", True) and gofmt:
-			CheckFormat(ui, repo, self.files, just_warn=gofmt_just_warn)
-		set_status("uploading CL metadata + diffs")
-		os.chdir(repo.root)
-		form_fields = [
-			("content_upload", "1"),
-			("reviewers", JoinComma(self.reviewer)),
-			("cc", JoinComma(self.cc)),
-			("description", self.desc),
-			("base_hashes", ""),
-		]
-
-		if self.name != "new":
-			form_fields.append(("issue", self.name))
-		vcs = None
-		# We do not include files when creating the issue,
-		# because we want the patch sets to record the repository
-		# and base revision they are diffs against.  We use the patch
-		# set message for that purpose, but there is no message with
-		# the first patch set.  Instead the message gets used as the
-		# new CL's overall subject.  So omit the diffs when creating
-		# and then we'll run an immediate upload.
-		# This has the effect that every CL begins with an empty "Patch set 1".
-		if self.files and not creating:
-			vcs = MercurialVCS(upload_options, ui, repo)
-			data = vcs.GenerateDiff(self.files)
-			files = vcs.GetBaseFiles(data)
-			if len(data) > MAX_UPLOAD_SIZE:
-				uploaded_diff_file = []
-				form_fields.append(("separate_patches", "1"))
-			else:
-				uploaded_diff_file = [("data", "data.diff", data)]
-		else:
-			uploaded_diff_file = [("data", "data.diff", emptydiff)]
-		
-		if vcs and self.name != "new":
-			form_fields.append(("subject", "diff -r " + vcs.base_rev + " " + ui.expandpath("default")))
-		else:
-			# First upload sets the subject for the CL itself.
-			form_fields.append(("subject", self.Subject()))
-		ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
-		response_body = MySend("/upload", body, content_type=ctype)
-		patchset = None
-		msg = response_body
-		lines = msg.splitlines()
-		if len(lines) >= 2:
-			msg = lines[0]
-			patchset = lines[1].strip()
-			patches = [x.split(" ", 1) for x in lines[2:]]
-		if response_body.startswith("Issue updated.") and quiet:
-			pass
-		else:
-			ui.status(msg + "\n")
-		set_status("uploaded CL metadata + diffs")
-		if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
-			raise hg_util.Abort("failed to update issue: " + response_body)
-		issue = msg[msg.rfind("/")+1:]
-		self.name = issue
-		if not self.url:
-			self.url = server_url_base + self.name
-		if not uploaded_diff_file:
-			set_status("uploading patches")
-			patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
-		if vcs:
-			set_status("uploading base files")
-			vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
-		if send_mail:
-			set_status("sending mail")
-			MySend("/" + issue + "/mail", payload="")
-		self.web = True
-		set_status("flushing changes to disk")
-		self.Flush(ui, repo)
-		return
-
-	def Mail(self, ui, repo):
-		pmsg = "Hello " + JoinComma(self.reviewer)
-		if self.cc:
-			pmsg += " (cc: %s)" % (', '.join(self.cc),)
-		pmsg += ",\n"
-		pmsg += "\n"
-		repourl = ui.expandpath("default")
-		if not self.mailed:
-			pmsg += "I'd like you to review this change to\n" + repourl + "\n"
-		else:
-			pmsg += "Please take another look.\n"
-		typecheck(pmsg, str)
-		PostMessage(ui, self.name, pmsg, subject=self.Subject())
-		self.mailed = True
-		self.Flush(ui, repo)
-
-def GoodCLName(name):
-	typecheck(name, str)
-	return re.match("^[0-9]+$", name)
-
-def ParseCL(text, name):
-	typecheck(text, str)
-	typecheck(name, str)
-	sname = None
-	lineno = 0
-	sections = {
-		'Author': '',
-		'Description': '',
-		'Files': '',
-		'URL': '',
-		'Reviewer': '',
-		'CC': '',
-		'Mailed': '',
-		'Private': '',
-	}
-	for line in text.split('\n'):
-		lineno += 1
-		line = line.rstrip()
-		if line != '' and line[0] == '#':
-			continue
-		if line == '' or line[0] == ' ' or line[0] == '\t':
-			if sname == None and line != '':
-				return None, lineno, 'text outside section'
-			if sname != None:
-				sections[sname] += line + '\n'
-			continue
-		p = line.find(':')
-		if p >= 0:
-			s, val = line[:p].strip(), line[p+1:].strip()
-			if s in sections:
-				sname = s
-				if val != '':
-					sections[sname] += val + '\n'
-				continue
-		return None, lineno, 'malformed section header'
-
-	for k in sections:
-		sections[k] = StripCommon(sections[k]).rstrip()
-
-	cl = CL(name)
-	if sections['Author']:
-		cl.copied_from = sections['Author']
-	cl.desc = sections['Description']
-	for line in sections['Files'].split('\n'):
-		i = line.find('#')
-		if i >= 0:
-			line = line[0:i].rstrip()
-		line = line.strip()
-		if line == '':
-			continue
-		cl.files.append(line)
-	cl.reviewer = SplitCommaSpace(sections['Reviewer'])
-	cl.cc = SplitCommaSpace(sections['CC'])
-	cl.url = sections['URL']
-	if sections['Mailed'] != 'False':
-		# Odd default, but avoids spurious mailings when
-		# reading old CLs that do not have a Mailed: line.
-		# CLs created with this update will always have 
-		# Mailed: False on disk.
-		cl.mailed = True
-	if sections['Private'] in ('True', 'true', 'Yes', 'yes'):
-		cl.private = True
-	if cl.desc == '<enter description here>':
-		cl.desc = ''
-	return cl, 0, ''
-
-def SplitCommaSpace(s):
-	typecheck(s, str)
-	s = s.strip()
-	if s == "":
-		return []
-	return re.split(", *", s)
-
-def CutDomain(s):
-	typecheck(s, str)
-	i = s.find('@')
-	if i >= 0:
-		s = s[0:i]
-	return s
-
-def JoinComma(l):
-	for s in l:
-		typecheck(s, str)
-	return ", ".join(l)
-
-def ExceptionDetail():
-	s = str(sys.exc_info()[0])
-	if s.startswith("<type '") and s.endswith("'>"):
-		s = s[7:-2]
-	elif s.startswith("<class '") and s.endswith("'>"):
-		s = s[8:-2]
-	arg = str(sys.exc_info()[1])
-	if len(arg) > 0:
-		s += ": " + arg
-	return s
-
-def IsLocalCL(ui, repo, name):
-	return GoodCLName(name) and os.access(CodeReviewDir(ui, repo) + "/cl." + name, 0)
-
-# Load CL from disk and/or the web.
-def LoadCL(ui, repo, name, web=True):
-	typecheck(name, str)
-	set_status("loading CL " + name)
-	if not GoodCLName(name):
-		return None, "invalid CL name"
-	dir = CodeReviewDir(ui, repo)
-	path = dir + "cl." + name
-	if os.access(path, 0):
-		ff = open(path)
-		text = ff.read()
-		ff.close()
-		cl, lineno, err = ParseCL(text, name)
-		if err != "":
-			return None, "malformed CL data: "+err
-		cl.local = True
-	else:
-		cl = CL(name)
-	if web:
-		set_status("getting issue metadata from web")
-		d = JSONGet(ui, "/api/" + name + "?messages=true")
-		set_status(None)
-		if d is None:
-			return None, "cannot load CL %s from server" % (name,)
-		if 'owner_email' not in d or 'issue' not in d or str(d['issue']) != name:
-			return None, "malformed response loading CL data from code review server"
-		cl.dict = d
-		cl.reviewer = d.get('reviewers', [])
-		cl.cc = d.get('cc', [])
-		if cl.local and cl.copied_from and cl.desc:
-			# local copy of CL written by someone else
-			# and we saved a description.  use that one,
-			# so that committers can edit the description
-			# before doing hg submit.
-			pass
-		else:
-			cl.desc = d.get('description', "")
-		cl.url = server_url_base + name
-		cl.web = True
-		cl.private = d.get('private', False) != False
-		cl.lgtm = []
-		for m in d.get('messages', []):
-			if m.get('approval', False) == True:
-				who = re.sub('@.*', '', m.get('sender', ''))
-				text = re.sub("\n(.|\n)*", '', m.get('text', ''))
-				cl.lgtm.append((who, text))
-
-	set_status("loaded CL " + name)
-	return cl, ''
-
-class LoadCLThread(threading.Thread):
-	def __init__(self, ui, repo, dir, f, web):
-		threading.Thread.__init__(self)
-		self.ui = ui
-		self.repo = repo
-		self.dir = dir
-		self.f = f
-		self.web = web
-		self.cl = None
-	def run(self):
-		cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
-		if err != '':
-			self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
-			return
-		self.cl = cl
-
-# Load all the CLs from this repository.
-def LoadAllCL(ui, repo, web=True):
-	dir = CodeReviewDir(ui, repo)
-	m = {}
-	files = [f for f in os.listdir(dir) if f.startswith('cl.')]
-	if not files:
-		return m
-	active = []
-	first = True
-	for f in files:
-		t = LoadCLThread(ui, repo, dir, f, web)
-		t.start()
-		if web and first:
-			# first request: wait in case it needs to authenticate
-			# otherwise we get lots of user/password prompts
-			# running in parallel.
-			t.join()
-			if t.cl:
-				m[t.cl.name] = t.cl
-			first = False
-		else:
-			active.append(t)
-	for t in active:
-		t.join()
-		if t.cl:
-			m[t.cl.name] = t.cl
-	return m
-
-# Find repository root.  On error, ui.warn and return None
-def RepoDir(ui, repo):
-	url = repo.url();
-	if not url.startswith('file:'):
-		ui.warn("repository %s is not in local file system\n" % (url,))
-		return None
-	url = url[5:]
-	if url.endswith('/'):
-		url = url[:-1]
-	typecheck(url, str)
-	return url
-
-# Find (or make) code review directory.  On error, ui.warn and return None
-def CodeReviewDir(ui, repo):
-	dir = RepoDir(ui, repo)
-	if dir == None:
-		return None
-	dir += '/.hg/codereview/'
-	if not os.path.isdir(dir):
-		try:
-			os.mkdir(dir, 0700)
-		except:
-			ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
-			return None
-	typecheck(dir, str)
-	return dir
-
-# Turn leading tabs into spaces, so that the common white space
-# prefix doesn't get confused when people's editors write out 
-# some lines with spaces, some with tabs.  Only a heuristic
-# (some editors don't use 8 spaces either) but a useful one.
-def TabsToSpaces(line):
-	i = 0
-	while i < len(line) and line[i] == '\t':
-		i += 1
-	return ' '*(8*i) + line[i:]
-
-# Strip maximal common leading white space prefix from text
-def StripCommon(text):
-	typecheck(text, str)
-	ws = None
-	for line in text.split('\n'):
-		line = line.rstrip()
-		if line == '':
-			continue
-		line = TabsToSpaces(line)
-		white = line[:len(line)-len(line.lstrip())]
-		if ws == None:
-			ws = white
-		else:
-			common = ''
-			for i in range(min(len(white), len(ws))+1):
-				if white[0:i] == ws[0:i]:
-					common = white[0:i]
-			ws = common
-		if ws == '':
-			break
-	if ws == None:
-		return text
-	t = ''
-	for line in text.split('\n'):
-		line = line.rstrip()
-		line = TabsToSpaces(line)
-		if line.startswith(ws):
-			line = line[len(ws):]
-		if line == '' and t == '':
-			continue
-		t += line + '\n'
-	while len(t) >= 2 and t[-2:] == '\n\n':
-		t = t[:-1]
-	typecheck(t, str)
-	return t
-
-# Indent text with indent.
-def Indent(text, indent):
-	typecheck(text, str)
-	typecheck(indent, str)
-	t = ''
-	for line in text.split('\n'):
-		t += indent + line + '\n'
-	typecheck(t, str)
-	return t
-
-# Return the first line of l
-def line1(text):
-	typecheck(text, str)
-	return text.split('\n')[0]
-
-_change_prolog = """# Change list.
-# Lines beginning with # are ignored.
-# Multi-line values should be indented.
-"""
-
-desc_re = '^(.+: |(tag )?(release|weekly)\.|fix build|undo CL)'
-
-desc_msg = '''Your CL description appears not to use the standard form.
-
-The first line of your change description is conventionally a
-one-line summary of the change, prefixed by the primary affected package,
-and is used as the subject for code review mail; the rest of the description
-elaborates.
-
-Examples:
-
-	encoding/rot13: new package
-
-	math: add IsInf, IsNaN
-	
-	net: fix cname in LookupHost
-
-	unicode: update to Unicode 5.0.2
-
-'''
-
-def promptyesno(ui, msg):
-	return ui.promptchoice(msg, ["&yes", "&no"], 0) == 0
-
-def promptremove(ui, repo, f):
-	if promptyesno(ui, "hg remove %s (y/n)?" % (f,)):
-		if hg_commands.remove(ui, repo, 'path:'+f) != 0:
-			ui.warn("error removing %s" % (f,))
-
-def promptadd(ui, repo, f):
-	if promptyesno(ui, "hg add %s (y/n)?" % (f,)):
-		if hg_commands.add(ui, repo, 'path:'+f) != 0:
-			ui.warn("error adding %s" % (f,))
-
-def EditCL(ui, repo, cl):
-	set_status(None)	# do not show status
-	s = cl.EditorText()
-	while True:
-		s = ui.edit(s, ui.username())
-		
-		# We can't trust Mercurial + Python not to die before making the change,
-		# so, by popular demand, just scribble the most recent CL edit into
-		# $(hg root)/last-change so that if Mercurial does die, people
-		# can look there for their work.
-		try:
-			f = open(repo.root+"/last-change", "w")
-			f.write(s)
-			f.close()
-		except:
-			pass
-
-		clx, line, err = ParseCL(s, cl.name)
-		if err != '':
-			if not promptyesno(ui, "error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err)):
-				return "change list not modified"
-			continue
-		
-		# Check description.
-		if clx.desc == '':
-			if promptyesno(ui, "change list should have a description\nre-edit (y/n)?"):
-				continue
-		elif re.search('<enter reason for undo>', clx.desc):
-			if promptyesno(ui, "change list description omits reason for undo\nre-edit (y/n)?"):
-				continue
-		elif not re.match(desc_re, clx.desc.split('\n')[0]):
-			if promptyesno(ui, desc_msg + "re-edit (y/n)?"):
-				continue
-
-		# Check file list for files that need to be hg added or hg removed
-		# or simply aren't understood.
-		pats = ['path:'+f for f in clx.files]
-		changed = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
-		deleted = hg_matchPattern(ui, repo, *pats, deleted=True)
-		unknown = hg_matchPattern(ui, repo, *pats, unknown=True)
-		ignored = hg_matchPattern(ui, repo, *pats, ignored=True)
-		clean = hg_matchPattern(ui, repo, *pats, clean=True)
-		files = []
-		for f in clx.files:
-			if f in changed:
-				files.append(f)
-				continue
-			if f in deleted:
-				promptremove(ui, repo, f)
-				files.append(f)
-				continue
-			if f in unknown:
-				promptadd(ui, repo, f)
-				files.append(f)
-				continue
-			if f in ignored:
-				ui.warn("error: %s is excluded by .hgignore; omitting\n" % (f,))
-				continue
-			if f in clean:
-				ui.warn("warning: %s is listed in the CL but unchanged\n" % (f,))
-				files.append(f)
-				continue
-			p = repo.root + '/' + f
-			if os.path.isfile(p):
-				ui.warn("warning: %s is a file but not known to hg\n" % (f,))
-				files.append(f)
-				continue
-			if os.path.isdir(p):
-				ui.warn("error: %s is a directory, not a file; omitting\n" % (f,))
-				continue
-			ui.warn("error: %s does not exist; omitting\n" % (f,))
-		clx.files = files
-
-		cl.desc = clx.desc
-		cl.reviewer = clx.reviewer
-		cl.cc = clx.cc
-		cl.files = clx.files
-		cl.private = clx.private
-		break
-	return ""
-
-# For use by submit, etc. (NOT by change)
-# Get change list number or list of files from command line.
-# If files are given, make a new change list.
-def CommandLineCL(ui, repo, pats, opts, defaultcc=None):
-	if len(pats) > 0 and GoodCLName(pats[0]):
-		if len(pats) != 1:
-			return None, "cannot specify change number and file names"
-		if opts.get('message'):
-			return None, "cannot use -m with existing CL"
-		cl, err = LoadCL(ui, repo, pats[0], web=True)
-		if err != "":
-			return None, err
-	else:
-		cl = CL("new")
-		cl.local = True
-		cl.files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
-		if not cl.files:
-			return None, "no files changed"
-	if opts.get('reviewer'):
-		cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
-	if opts.get('cc'):
-		cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
-	if defaultcc:
-		cl.cc = Add(cl.cc, defaultcc)
-	if cl.name == "new":
-		if opts.get('message'):
-			cl.desc = opts.get('message')
-		else:
-			err = EditCL(ui, repo, cl)
-			if err != '':
-				return None, err
-	return cl, ""
-
-#######################################################################
-# Change list file management
-
-# Return list of changed files in repository that match pats.
-# The patterns came from the command line, so we warn
-# if they have no effect or cannot be understood.
-def ChangedFiles(ui, repo, pats, taken=None):
-	taken = taken or {}
-	# Run each pattern separately so that we can warn about
-	# patterns that didn't do anything useful.
-	for p in pats:
-		for f in hg_matchPattern(ui, repo, p, unknown=True):
-			promptadd(ui, repo, f)
-		for f in hg_matchPattern(ui, repo, p, removed=True):
-			promptremove(ui, repo, f)
-		files = hg_matchPattern(ui, repo, p, modified=True, added=True, removed=True)
-		for f in files:
-			if f in taken:
-				ui.warn("warning: %s already in CL %s\n" % (f, taken[f].name))
-		if not files:
-			ui.warn("warning: %s did not match any modified files\n" % (p,))
-
-	# Again, all at once (eliminates duplicates)
-	l = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
-	l.sort()
-	if taken:
-		l = Sub(l, taken.keys())
-	return l
-
-# Return list of changed files in repository that match pats and still exist.
-def ChangedExistingFiles(ui, repo, pats, opts):
-	l = hg_matchPattern(ui, repo, *pats, modified=True, added=True)
-	l.sort()
-	return l
-
-# Return list of files claimed by existing CLs
-def Taken(ui, repo):
-	all = LoadAllCL(ui, repo, web=False)
-	taken = {}
-	for _, cl in all.items():
-		for f in cl.files:
-			taken[f] = cl
-	return taken
-
-# Return list of changed files that are not claimed by other CLs
-def DefaultFiles(ui, repo, pats):
-	return ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
-
-#######################################################################
-# File format checking.
-
-def CheckFormat(ui, repo, files, just_warn=False):
-	set_status("running gofmt")
-	CheckGofmt(ui, repo, files, just_warn)
-	CheckTabfmt(ui, repo, files, just_warn)
-
-# Check that gofmt run on the list of files does not change them
-def CheckGofmt(ui, repo, files, just_warn):
-	files = gofmt_required(files)
-	if not files:
-		return
-	cwd = os.getcwd()
-	files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
-	files = [f for f in files if os.access(f, 0)]
-	if not files:
-		return
-	try:
-		cmd = subprocess.Popen(["gofmt", "-l"] + files, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=sys.platform != "win32")
-		cmd.stdin.close()
-	except:
-		raise hg_util.Abort("gofmt: " + ExceptionDetail())
-	data = cmd.stdout.read()
-	errors = cmd.stderr.read()
-	cmd.wait()
-	set_status("done with gofmt")
-	if len(errors) > 0:
-		ui.warn("gofmt errors:\n" + errors.rstrip() + "\n")
-		return
-	if len(data) > 0:
-		msg = "gofmt needs to format these files (run hg gofmt):\n" + Indent(data, "\t").rstrip()
-		if just_warn:
-			ui.warn("warning: " + msg + "\n")
-		else:
-			raise hg_util.Abort(msg)
-	return
-
-# Check that *.[chys] files indent using tabs.
-def CheckTabfmt(ui, repo, files, just_warn):
-	files = [f for f in files if f.startswith('src/') and re.search(r"\.[chys]$", f) and not re.search(r"\.tab\.[ch]$", f)]
-	if not files:
-		return
-	cwd = os.getcwd()
-	files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
-	files = [f for f in files if os.access(f, 0)]
-	badfiles = []
-	for f in files:
-		try:
-			for line in open(f, 'r'):
-				# Four leading spaces is enough to complain about,
-				# except that some Plan 9 code uses four spaces as the label indent,
-				# so allow that.
-				if line.startswith('    ') and not re.match('    [A-Za-z0-9_]+:', line):
-					badfiles.append(f)
-					break
-		except:
-			# ignore cannot open file, etc.
-			pass
-	if len(badfiles) > 0:
-		msg = "these files use spaces for indentation (use tabs instead):\n\t" + "\n\t".join(badfiles)
-		if just_warn:
-			ui.warn("warning: " + msg + "\n")
-		else:
-			raise hg_util.Abort(msg)
-	return
-
-#######################################################################
-# CONTRIBUTORS file parsing
-
-contributorsCache = None
-contributorsURL = None
-
-def ReadContributors(ui, repo):
-	global contributorsCache
-	if contributorsCache is not None:
-		return contributorsCache
-
-	try:
-		if contributorsURL is not None:
-			opening = contributorsURL
-			f = urllib2.urlopen(contributorsURL)
-		else:
-			opening = repo.root + '/CONTRIBUTORS'
-			f = open(repo.root + '/CONTRIBUTORS', 'r')
-	except:
-		ui.write("warning: cannot open %s: %s\n" % (opening, ExceptionDetail()))
-		return
-
-	contributors = {}
-	for line in f:
-		# CONTRIBUTORS is a list of lines like:
-		#	Person <email>
-		#	Person <email> <alt-email>
-		# The first email address is the one used in commit logs.
-		if line.startswith('#'):
-			continue
-		m = re.match(r"([^<>]+\S)\s+(<[^<>\s]+>)((\s+<[^<>\s]+>)*)\s*$", line)
-		if m:
-			name = m.group(1)
-			email = m.group(2)[1:-1]
-			contributors[email.lower()] = (name, email)
-			for extra in m.group(3).split():
-				contributors[extra[1:-1].lower()] = (name, email)
-
-	contributorsCache = contributors
-	return contributors
-
-def CheckContributor(ui, repo, user=None):
-	set_status("checking CONTRIBUTORS file")
-	user, userline = FindContributor(ui, repo, user, warn=False)
-	if not userline:
-		raise hg_util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
-	return userline
-
-def FindContributor(ui, repo, user=None, warn=True):
-	if not user:
-		user = ui.config("ui", "username")
-		if not user:
-			raise hg_util.Abort("[ui] username is not configured in .hgrc")
-	user = user.lower()
-	m = re.match(r".*<(.*)>", user)
-	if m:
-		user = m.group(1)
-
-	contributors = ReadContributors(ui, repo)
-	if user not in contributors:
-		if warn:
-			ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
-		return user, None
-	
-	user, email = contributors[user]
-	return email, "%s <%s>" % (user, email)
-
-#######################################################################
-# Mercurial helper functions.
-# Read http://mercurial.selenic.com/wiki/MercurialApi before writing any of these.
-# We use the ui.pushbuffer/ui.popbuffer + hg_commands.xxx tricks for all interaction
-# with Mercurial.  It has proved the most stable as they make changes.
-
-hgversion = hg_util.version()
-
-# We require Mercurial 1.9 and suggest Mercurial 2.0.
-# The details of the scmutil package changed then,
-# so allowing earlier versions would require extra band-aids below.
-# Ubuntu 11.10 ships with Mercurial 1.9.1 as the default version.
-hg_required = "1.9"
-hg_suggested = "2.0"
-
-old_message = """
-
-The code review extension requires Mercurial """+hg_required+""" or newer.
-You are using Mercurial """+hgversion+""".
-
-To install a new Mercurial, use
-
-	sudo easy_install mercurial=="""+hg_suggested+"""
-
-or visit http://mercurial.selenic.com/downloads/.
-"""
-
-linux_message = """
-You may need to clear your current Mercurial installation by running:
-
-	sudo apt-get remove mercurial mercurial-common
-	sudo rm -rf /etc/mercurial
-"""
-
-if hgversion < hg_required:
-	msg = old_message
-	if os.access("/etc/mercurial", 0):
-		msg += linux_message
-	raise hg_util.Abort(msg)
-
-from mercurial.hg import clean as hg_clean
-from mercurial import cmdutil as hg_cmdutil
-from mercurial import error as hg_error
-from mercurial import match as hg_match
-from mercurial import node as hg_node
-
-class uiwrap(object):
-	def __init__(self, ui):
-		self.ui = ui
-		ui.pushbuffer()
-		self.oldQuiet = ui.quiet
-		ui.quiet = True
-		self.oldVerbose = ui.verbose
-		ui.verbose = False
-	def output(self):
-		ui = self.ui
-		ui.quiet = self.oldQuiet
-		ui.verbose = self.oldVerbose
-		return ui.popbuffer()
-
-def to_slash(path):
-	if sys.platform == "win32":
-		return path.replace('\\', '/')
-	return path
-
-def hg_matchPattern(ui, repo, *pats, **opts):
-	w = uiwrap(ui)
-	hg_commands.status(ui, repo, *pats, **opts)
-	text = w.output()
-	ret = []
-	prefix = to_slash(os.path.realpath(repo.root))+'/'
-	for line in text.split('\n'):
-		f = line.split()
-		if len(f) > 1:
-			if len(pats) > 0:
-				# Given patterns, Mercurial shows relative to cwd
-				p = to_slash(os.path.realpath(f[1]))
-				if not p.startswith(prefix):
-					print >>sys.stderr, "File %s not in repo root %s.\n" % (p, prefix)
-				else:
-					ret.append(p[len(prefix):])
-			else:
-				# Without patterns, Mercurial shows relative to root (what we want)
-				ret.append(to_slash(f[1]))
-	return ret
-
-def hg_heads(ui, repo):
-	w = uiwrap(ui)
-	hg_commands.heads(ui, repo)
-	return w.output()
-
-noise = [
-	"",
-	"resolving manifests",
-	"searching for changes",
-	"couldn't find merge tool hgmerge",
-	"adding changesets",
-	"adding manifests",
-	"adding file changes",
-	"all local heads known remotely",
-]
-
-def isNoise(line):
-	line = str(line)
-	for x in noise:
-		if line == x:
-			return True
-	return False
-
-def hg_incoming(ui, repo):
-	w = uiwrap(ui)
-	ret = hg_commands.incoming(ui, repo, force=False, bundle="")
-	if ret and ret != 1:
-		raise hg_util.Abort(ret)
-	return w.output()
-
-def hg_log(ui, repo, **opts):
-	for k in ['date', 'keyword', 'rev', 'user']:
-		if not opts.has_key(k):
-			opts[k] = ""
-	w = uiwrap(ui)
-	ret = hg_commands.log(ui, repo, **opts)
-	if ret:
-		raise hg_util.Abort(ret)
-	return w.output()
-
-def hg_outgoing(ui, repo, **opts):
-	w = uiwrap(ui)
-	ret = hg_commands.outgoing(ui, repo, **opts)
-	if ret and ret != 1:
-		raise hg_util.Abort(ret)
-	return w.output()
-
-def hg_pull(ui, repo, **opts):
-	w = uiwrap(ui)
-	ui.quiet = False
-	ui.verbose = True  # for file list
-	err = hg_commands.pull(ui, repo, **opts)
-	for line in w.output().split('\n'):
-		if isNoise(line):
-			continue
-		if line.startswith('moving '):
-			line = 'mv ' + line[len('moving '):]
-		if line.startswith('getting ') and line.find(' to ') >= 0:
-			line = 'mv ' + line[len('getting '):]
-		if line.startswith('getting '):
-			line = '+ ' + line[len('getting '):]
-		if line.startswith('removing '):
-			line = '- ' + line[len('removing '):]
-		ui.write(line + '\n')
-	return err
-
-def hg_push(ui, repo, **opts):
-	w = uiwrap(ui)
-	ui.quiet = False
-	ui.verbose = True
-	err = hg_commands.push(ui, repo, **opts)
-	for line in w.output().split('\n'):
-		if not isNoise(line):
-			ui.write(line + '\n')
-	return err
-
-def hg_commit(ui, repo, *pats, **opts):
-	return hg_commands.commit(ui, repo, *pats, **opts)
-
-#######################################################################
-# Mercurial precommit hook to disable commit except through this interface.
-
-commit_okay = False
-
-def precommithook(ui, repo, **opts):
-	if commit_okay:
-		return False  # False means okay.
-	ui.write("\ncodereview extension enabled; use mail, upload, or submit instead of commit\n\n")
-	return True
-
-#######################################################################
-# @clnumber file pattern support
-
-# We replace scmutil.match with the MatchAt wrapper to add the @clnumber pattern.
-
-match_repo = None
-match_ui = None
-match_orig = None
-
-def InstallMatch(ui, repo):
-	global match_repo
-	global match_ui
-	global match_orig
-
-	match_ui = ui
-	match_repo = repo
-
-	from mercurial import scmutil
-	match_orig = scmutil.match
-	scmutil.match = MatchAt
-
-def MatchAt(ctx, pats=None, opts=None, globbed=False, default='relpath'):
-	taken = []
-	files = []
-	pats = pats or []
-	opts = opts or {}
-	
-	for p in pats:
-		if p.startswith('@'):
-			taken.append(p)
-			clname = p[1:]
-			if clname == "default":
-				files = DefaultFiles(match_ui, match_repo, [])
-			else:
-				if not GoodCLName(clname):
-					raise hg_util.Abort("invalid CL name " + clname)
-				cl, err = LoadCL(match_repo.ui, match_repo, clname, web=False)
-				if err != '':
-					raise hg_util.Abort("loading CL " + clname + ": " + err)
-				if not cl.files:
-					raise hg_util.Abort("no files in CL " + clname)
-				files = Add(files, cl.files)
-	pats = Sub(pats, taken) + ['path:'+f for f in files]
-
-	# work-around for http://selenic.com/hg/rev/785bbc8634f8
-	if not hasattr(ctx, 'match'):
-		ctx = ctx[None]
-	return match_orig(ctx, pats=pats, opts=opts, globbed=globbed, default=default)
-
-#######################################################################
-# Commands added by code review extension.
-
-# As of Mercurial 2.1 the commands are all required to return integer
-# exit codes, whereas earlier versions allowed returning arbitrary strings
-# to be printed as errors.  We wrap the old functions to make sure we
-# always return integer exit codes now.  Otherwise Mercurial dies
-# with a TypeError traceback (unsupported operand type(s) for &: 'str' and 'int').
-# Introduce a Python decorator to convert old functions to the new
-# stricter convention.
-
-def hgcommand(f):
-	def wrapped(ui, repo, *pats, **opts):
-		err = f(ui, repo, *pats, **opts)
-		if type(err) is int:
-			return err
-		if not err:
-			return 0
-		raise hg_util.Abort(err)
-	wrapped.__doc__ = f.__doc__
-	return wrapped
-
-#######################################################################
-# hg change
-
-@hgcommand
-def change(ui, repo, *pats, **opts):
-	"""create, edit or delete a change list
-
-	Create, edit or delete a change list.
-	A change list is a group of files to be reviewed and submitted together,
-	plus a textual description of the change.
-	Change lists are referred to by simple alphanumeric names.
-
-	Changes must be reviewed before they can be submitted.
-
-	In the absence of options, the change command opens the
-	change list for editing in the default editor.
-
-	Deleting a change with the -d or -D flag does not affect
-	the contents of the files listed in that change.  To revert
-	the files listed in a change, use
-
-		hg revert @123456
-
-	before running hg change -d 123456.
-	"""
-
-	if codereview_disabled:
-		return codereview_disabled
-	
-	dirty = {}
-	if len(pats) > 0 and GoodCLName(pats[0]):
-		name = pats[0]
-		if len(pats) != 1:
-			return "cannot specify CL name and file patterns"
-		pats = pats[1:]
-		cl, err = LoadCL(ui, repo, name, web=True)
-		if err != '':
-			return err
-		if not cl.local and (opts["stdin"] or not opts["stdout"]):
-			return "cannot change non-local CL " + name
-	else:
-		name = "new"
-		cl = CL("new")
-		if repo[None].branch() != "default":
-			return "cannot create CL outside default branch; switch with 'hg update default'"
-		dirty[cl] = True
-		files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
-
-	if opts["delete"] or opts["deletelocal"]:
-		if opts["delete"] and opts["deletelocal"]:
-			return "cannot use -d and -D together"
-		flag = "-d"
-		if opts["deletelocal"]:
-			flag = "-D"
-		if name == "new":
-			return "cannot use "+flag+" with file patterns"
-		if opts["stdin"] or opts["stdout"]:
-			return "cannot use "+flag+" with -i or -o"
-		if not cl.local:
-			return "cannot change non-local CL " + name
-		if opts["delete"]:
-			if cl.copied_from:
-				return "original author must delete CL; hg change -D will remove locally"
-			PostMessage(ui, cl.name, "*** Abandoned ***", send_mail=cl.mailed)
-			EditDesc(cl.name, closed=True, private=cl.private)
-		cl.Delete(ui, repo)
-		return
-
-	if opts["stdin"]:
-		s = sys.stdin.read()
-		clx, line, err = ParseCL(s, name)
-		if err != '':
-			return "error parsing change list: line %d: %s" % (line, err)
-		if clx.desc is not None:
-			cl.desc = clx.desc;
-			dirty[cl] = True
-		if clx.reviewer is not None:
-			cl.reviewer = clx.reviewer
-			dirty[cl] = True
-		if clx.cc is not None:
-			cl.cc = clx.cc
-			dirty[cl] = True
-		if clx.files is not None:
-			cl.files = clx.files
-			dirty[cl] = True
-		if clx.private != cl.private:
-			cl.private = clx.private
-			dirty[cl] = True
-
-	if not opts["stdin"] and not opts["stdout"]:
-		if name == "new":
-			cl.files = files
-		err = EditCL(ui, repo, cl)
-		if err != "":
-			return err
-		dirty[cl] = True
-
-	for d, _ in dirty.items():
-		name = d.name
-		d.Flush(ui, repo)
-		if name == "new":
-			d.Upload(ui, repo, quiet=True)
-
-	if opts["stdout"]:
-		ui.write(cl.EditorText())
-	elif opts["pending"]:
-		ui.write(cl.PendingText())
-	elif name == "new":
-		if ui.quiet:
-			ui.write(cl.name)
-		else:
-			ui.write("CL created: " + cl.url + "\n")
-	return
-
-#######################################################################
-# hg code-login (broken?)
-
-@hgcommand
-def code_login(ui, repo, **opts):
-	"""log in to code review server
-
-	Logs in to the code review server, saving a cookie in
-	a file in your home directory.
-	"""
-	if codereview_disabled:
-		return codereview_disabled
-
-	MySend(None)
-
-#######################################################################
-# hg clpatch / undo / release-apply / download
-# All concerned with applying or unapplying patches to the repository.
-
-@hgcommand
-def clpatch(ui, repo, clname, **opts):
-	"""import a patch from the code review server
-
-	Imports a patch from the code review server into the local client.
-	If the local client has already modified any of the files that the
-	patch modifies, this command will refuse to apply the patch.
-
-	Submitting an imported patch will keep the original author's
-	name as the Author: line but add your own name to a Committer: line.
-	"""
-	if repo[None].branch() != "default":
-		return "cannot run hg clpatch outside default branch"
-	return clpatch_or_undo(ui, repo, clname, opts, mode="clpatch")
-
-@hgcommand
-def undo(ui, repo, clname, **opts):
-	"""undo the effect of a CL
-	
-	Creates a new CL that undoes an earlier CL.
-	After creating the CL, opens the CL text for editing so that
-	you can add the reason for the undo to the description.
-	"""
-	if repo[None].branch() != "default":
-		return "cannot run hg undo outside default branch"
-	return clpatch_or_undo(ui, repo, clname, opts, mode="undo")
-
-@hgcommand
-def release_apply(ui, repo, clname, **opts):
-	"""apply a CL to the release branch
-
-	Creates a new CL copying a previously committed change
-	from the main branch to the release branch.
-	The current client must either be clean or already be in
-	the release branch.
-	
-	The release branch must be created by starting with a
-	clean client, disabling the code review plugin, and running:
-	
-		hg update weekly.YYYY-MM-DD
-		hg branch release-branch.rNN
-		hg commit -m 'create release-branch.rNN'
-		hg push --new-branch
-	
-	Then re-enable the code review plugin.
-	
-	People can test the release branch by running
-	
-		hg update release-branch.rNN
-	
-	in a clean client.  To return to the normal tree,
-	
-		hg update default
-	
-	Move changes since the weekly into the release branch 
-	using hg release-apply followed by the usual code review
-	process and hg submit.
-
-	When it comes time to tag the release, record the
-	final long-form tag of the release-branch.rNN
-	in the *default* branch's .hgtags file.  That is, run
-	
-		hg update default
-	
-	and then edit .hgtags as you would for a weekly.
-		
-	"""
-	c = repo[None]
-	if not releaseBranch:
-		return "no active release branches"
-	if c.branch() != releaseBranch:
-		if c.modified() or c.added() or c.removed():
-			raise hg_util.Abort("uncommitted local changes - cannot switch branches")
-		err = hg_clean(repo, releaseBranch)
-		if err:
-			return err
-	try:
-		err = clpatch_or_undo(ui, repo, clname, opts, mode="backport")
-		if err:
-			raise hg_util.Abort(err)
-	except Exception, e:
-		hg_clean(repo, "default")
-		raise e
-	return None
-
-def rev2clname(rev):
-	# Extract CL name from revision description.
-	# The last line in the description that is a codereview URL is the real one.
-	# Earlier lines might be part of the user-written description.
-	all = re.findall('(?m)^http://codereview.appspot.com/([0-9]+)$', rev.description())
-	if len(all) > 0:
-		return all[-1]
-	return ""
-
-undoHeader = """undo CL %s / %s
-
-<enter reason for undo>
-
-««« original CL description
-"""
-
-undoFooter = """
-»»»
-"""
-
-backportHeader = """[%s] %s
-
-««« CL %s / %s
-"""
-
-backportFooter = """
-»»»
-"""
-
-# Implementation of clpatch/undo.
-def clpatch_or_undo(ui, repo, clname, opts, mode):
-	if codereview_disabled:
-		return codereview_disabled
-
-	if mode == "undo" or mode == "backport":
-		# Find revision in Mercurial repository.
-		# Assume CL number is 7+ decimal digits.
-		# Otherwise is either change log sequence number (fewer decimal digits),
-		# hexadecimal hash, or tag name.
-		# Mercurial will fall over long before the change log
-		# sequence numbers get to be 7 digits long.
-		if re.match('^[0-9]{7,}$', clname):
-			found = False
-			for r in hg_log(ui, repo, keyword="codereview.appspot.com/"+clname, limit=100, template="{node}\n").split():
-				rev = repo[r]
-				# Last line with a code review URL is the actual review URL.
-				# Earlier ones might be part of the CL description.
-				n = rev2clname(rev)
-				if n == clname:
-					found = True
-					break
-			if not found:
-				return "cannot find CL %s in local repository" % clname
-		else:
-			rev = repo[clname]
-			if not rev:
-				return "unknown revision %s" % clname
-			clname = rev2clname(rev)
-			if clname == "":
-				return "cannot find CL name in revision description"
-		
-		# Create fresh CL and start with patch that would reverse the change.
-		vers = hg_node.short(rev.node())
-		cl = CL("new")
-		desc = str(rev.description())
-		if mode == "undo":
-			cl.desc = (undoHeader % (clname, vers)) + desc + undoFooter
-		else:
-			cl.desc = (backportHeader % (releaseBranch, line1(desc), clname, vers)) + desc + undoFooter
-		v1 = vers
-		v0 = hg_node.short(rev.parents()[0].node())
-		if mode == "undo":
-			arg = v1 + ":" + v0
-		else:
-			vers = v0
-			arg = v0 + ":" + v1
-		patch = RunShell(["hg", "diff", "--git", "-r", arg])
-
-	else:  # clpatch
-		cl, vers, patch, err = DownloadCL(ui, repo, clname)
-		if err != "":
-			return err
-		if patch == emptydiff:
-			return "codereview issue %s has no diff" % clname
-
-	# find current hg version (hg identify)
-	ctx = repo[None]
-	parents = ctx.parents()
-	id = '+'.join([hg_node.short(p.node()) for p in parents])
-
-	# if version does not match the patch version,
-	# try to update the patch line numbers.
-	if vers != "" and id != vers:
-		# "vers in repo" gives the wrong answer
-		# on some versions of Mercurial.  Instead, do the actual
-		# lookup and catch the exception.
-		try:
-			repo[vers].description()
-		except:
-			return "local repository is out of date; sync to get %s" % (vers)
-		patch1, err = portPatch(repo, patch, vers, id)
-		if err != "":
-			if not opts["ignore_hgpatch_failure"]:
-				return "codereview issue %s is out of date: %s (%s->%s)" % (clname, err, vers, id)
-		else:
-			patch = patch1
-	argv = ["hgpatch"]
-	if opts["no_incoming"] or mode == "backport":
-		argv += ["--checksync=false"]
-	try:
-		cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=sys.platform != "win32")
-	except:
-		return "hgpatch: " + ExceptionDetail() + "\nInstall hgpatch with:\n$ go get code.google.com/p/go.codereview/cmd/hgpatch\n"
-
-	out, err = cmd.communicate(patch)
-	if cmd.returncode != 0 and not opts["ignore_hgpatch_failure"]:
-		return "hgpatch failed"
-	cl.local = True
-	cl.files = out.strip().split()
-	if not cl.files and not opts["ignore_hgpatch_failure"]:
-		return "codereview issue %s has no changed files" % clname
-	files = ChangedFiles(ui, repo, [])
-	extra = Sub(cl.files, files)
-	if extra:
-		ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
-	cl.Flush(ui, repo)
-	if mode == "undo":
-		err = EditCL(ui, repo, cl)
-		if err != "":
-			return "CL created, but error editing: " + err
-		cl.Flush(ui, repo)
-	else:
-		ui.write(cl.PendingText() + "\n")
-
-# portPatch rewrites patch from being a patch against
-# oldver to being a patch against newver.
-def portPatch(repo, patch, oldver, newver):
-	lines = patch.splitlines(True) # True = keep \n
-	delta = None
-	for i in range(len(lines)):
-		line = lines[i]
-		if line.startswith('--- a/'):
-			file = line[6:-1]
-			delta = fileDeltas(repo, file, oldver, newver)
-		if not delta or not line.startswith('@@ '):
-			continue
-		# @@ -x,y +z,w @@ means the patch chunk replaces
-		# the original file's line numbers x up to x+y with the
-		# line numbers z up to z+w in the new file.
-		# Find the delta from x in the original to the same
-		# line in the current version and add that delta to both
-		# x and z.
-		m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
-		if not m:
-			return None, "error parsing patch line numbers"
-		n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
-		d, err = lineDelta(delta, n1, len1)
-		if err != "":
-			return "", err
-		n1 += d
-		n2 += d
-		lines[i] = "@@ -%d,%d +%d,%d @@\n" % (n1, len1, n2, len2)
-		
-	newpatch = ''.join(lines)
-	return newpatch, ""
-
-# fileDelta returns the line number deltas for the given file's
-# changes from oldver to newver.
-# The deltas are a list of (n, len, newdelta) triples that say
-# lines [n, n+len) were modified, and after that range the
-# line numbers are +newdelta from what they were before.
-def fileDeltas(repo, file, oldver, newver):
-	cmd = ["hg", "diff", "--git", "-r", oldver + ":" + newver, "path:" + file]
-	data = RunShell(cmd, silent_ok=True)
-	deltas = []
-	for line in data.splitlines():
-		m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
-		if not m:
-			continue
-		n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
-		deltas.append((n1, len1, n2+len2-(n1+len1)))
-	return deltas
-
-# lineDelta finds the appropriate line number delta to apply to the lines [n, n+len).
-# It returns an error if those lines were rewritten by the patch.
-def lineDelta(deltas, n, len):
-	d = 0
-	for (old, oldlen, newdelta) in deltas:
-		if old >= n+len:
-			break
-		if old+len > n:
-			return 0, "patch and recent changes conflict"
-		d = newdelta
-	return d, ""
-
-@hgcommand
-def download(ui, repo, clname, **opts):
-	"""download a change from the code review server
-
-	Download prints a description of the given change list
-	followed by its diff, downloaded from the code review server.
-	"""
-	if codereview_disabled:
-		return codereview_disabled
-
-	cl, vers, patch, err = DownloadCL(ui, repo, clname)
-	if err != "":
-		return err
-	ui.write(cl.EditorText() + "\n")
-	ui.write(patch + "\n")
-	return
-
-#######################################################################
-# hg file
-
-@hgcommand
-def file(ui, repo, clname, pat, *pats, **opts):
-	"""assign files to or remove files from a change list
-
-	Assign files to or (with -d) remove files from a change list.
-
-	The -d option only removes files from the change list.
-	It does not edit them or remove them from the repository.
-	"""
-	if codereview_disabled:
-		return codereview_disabled
-
-	pats = tuple([pat] + list(pats))
-	if not GoodCLName(clname):
-		return "invalid CL name " + clname
-
-	dirty = {}
-	cl, err = LoadCL(ui, repo, clname, web=False)
-	if err != '':
-		return err
-	if not cl.local:
-		return "cannot change non-local CL " + clname
-
-	files = ChangedFiles(ui, repo, pats)
-
-	if opts["delete"]:
-		oldfiles = Intersect(files, cl.files)
-		if oldfiles:
-			if not ui.quiet:
-				ui.status("# Removing files from CL.  To undo:\n")
-				ui.status("#	cd %s\n" % (repo.root))
-				for f in oldfiles:
-					ui.status("#	hg file %s %s\n" % (cl.name, f))
-			cl.files = Sub(cl.files, oldfiles)
-			cl.Flush(ui, repo)
-		else:
-			ui.status("no such files in CL")
-		return
-
-	if not files:
-		return "no such modified files"
-
-	files = Sub(files, cl.files)
-	taken = Taken(ui, repo)
-	warned = False
-	for f in files:
-		if f in taken:
-			if not warned and not ui.quiet:
-				ui.status("# Taking files from other CLs.  To undo:\n")
-				ui.status("#	cd %s\n" % (repo.root))
-				warned = True
-			ocl = taken[f]
-			if not ui.quiet:
-				ui.status("#	hg file %s %s\n" % (ocl.name, f))
-			if ocl not in dirty:
-				ocl.files = Sub(ocl.files, files)
-				dirty[ocl] = True
-	cl.files = Add(cl.files, files)
-	dirty[cl] = True
-	for d, _ in dirty.items():
-		d.Flush(ui, repo)
-	return
-
-#######################################################################
-# hg gofmt
-
-@hgcommand
-def gofmt(ui, repo, *pats, **opts):
-	"""apply gofmt to modified files
-
-	Applies gofmt to the modified files in the repository that match
-	the given patterns.
-	"""
-	if codereview_disabled:
-		return codereview_disabled
-
-	files = ChangedExistingFiles(ui, repo, pats, opts)
-	files = gofmt_required(files)
-	if not files:
-		return "no modified go files"
-	cwd = os.getcwd()
-	files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
-	try:
-		cmd = ["gofmt", "-l"]
-		if not opts["list"]:
-			cmd += ["-w"]
-		if os.spawnvp(os.P_WAIT, "gofmt", cmd + files) != 0:
-			raise hg_util.Abort("gofmt did not exit cleanly")
-	except hg_error.Abort, e:
-		raise
-	except:
-		raise hg_util.Abort("gofmt: " + ExceptionDetail())
-	return
-
-def gofmt_required(files):
-	return [f for f in files if (not f.startswith('test/') or f.startswith('test/bench/')) and f.endswith('.go')]
-
-#######################################################################
-# hg mail
-
-@hgcommand
-def mail(ui, repo, *pats, **opts):
-	"""mail a change for review
-
-	Uploads a patch to the code review server and then sends mail
-	to the reviewer and CC list asking for a review.
-	"""
-	if codereview_disabled:
-		return codereview_disabled
-
-	cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
-	if err != "":
-		return err
-	cl.Upload(ui, repo, gofmt_just_warn=True)
-	if not cl.reviewer:
-		# If no reviewer is listed, assign the review to defaultcc.
-		# This makes sure that it appears in the 
-		# codereview.appspot.com/user/defaultcc
-		# page, so that it doesn't get dropped on the floor.
-		if not defaultcc:
-			return "no reviewers listed in CL"
-		cl.cc = Sub(cl.cc, defaultcc)
-		cl.reviewer = defaultcc
-		cl.Flush(ui, repo)
-
-	if cl.files == []:
-		return "no changed files, not sending mail"
-
-	cl.Mail(ui, repo)		
-
-#######################################################################
-# hg p / hg pq / hg ps / hg pending
-
-@hgcommand
-def ps(ui, repo, *pats, **opts):
-	"""alias for hg p --short
-	"""
-	opts['short'] = True
-	return pending(ui, repo, *pats, **opts)
-
-@hgcommand
-def pq(ui, repo, *pats, **opts):
-	"""alias for hg p --quick
-	"""
-	opts['quick'] = True
-	return pending(ui, repo, *pats, **opts)
-
-@hgcommand
-def pending(ui, repo, *pats, **opts):
-	"""show pending changes
-
-	Lists pending changes followed by a list of unassigned but modified files.
-	"""
-	if codereview_disabled:
-		return codereview_disabled
-
-	quick = opts.get('quick', False)
-	short = opts.get('short', False)
-	m = LoadAllCL(ui, repo, web=not quick and not short)
-	names = m.keys()
-	names.sort()
-	for name in names:
-		cl = m[name]
-		if short:
-			ui.write(name + "\t" + line1(cl.desc) + "\n")
-		else:
-			ui.write(cl.PendingText(quick=quick) + "\n")
-
-	if short:
-		return
-	files = DefaultFiles(ui, repo, [])
-	if len(files) > 0:
-		s = "Changed files not in any CL:\n"
-		for f in files:
-			s += "\t" + f + "\n"
-		ui.write(s)
-
-#######################################################################
-# hg submit
-
-def need_sync():
-	raise hg_util.Abort("local repository out of date; must sync before submit")
-
-@hgcommand
-def submit(ui, repo, *pats, **opts):
-	"""submit change to remote repository
-
-	Submits change to remote repository.
-	Bails out if the local repository is not in sync with the remote one.
-	"""
-	if codereview_disabled:
-		return codereview_disabled
-
-	# We already called this on startup but sometimes Mercurial forgets.
-	set_mercurial_encoding_to_utf8()
-
-	if not opts["no_incoming"] and hg_incoming(ui, repo):
-		need_sync()
-
-	cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
-	if err != "":
-		return err
-
-	user = None
-	if cl.copied_from:
-		user = cl.copied_from
-	userline = CheckContributor(ui, repo, user)
-	typecheck(userline, str)
-
-	about = ""
-	if cl.reviewer:
-		about += "R=" + JoinComma([CutDomain(s) for s in cl.reviewer]) + "\n"
-	if opts.get('tbr'):
-		tbr = SplitCommaSpace(opts.get('tbr'))
-		cl.reviewer = Add(cl.reviewer, tbr)
-		about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
-	if cl.cc:
-		about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
-
-	if not cl.reviewer:
-		return "no reviewers listed in CL"
-
-	if not cl.local:
-		return "cannot submit non-local CL"
-
-	# upload, to sync current patch and also get change number if CL is new.
-	if not cl.copied_from:
-		cl.Upload(ui, repo, gofmt_just_warn=True)
-
-	# check gofmt for real; allowed upload to warn in order to save CL.
-	cl.Flush(ui, repo)
-	CheckFormat(ui, repo, cl.files)
-
-	about += "%s%s\n" % (server_url_base, cl.name)
-
-	if cl.copied_from:
-		about += "\nCommitter: " + CheckContributor(ui, repo, None) + "\n"
-	typecheck(about, str)
-
-	if not cl.mailed and not cl.copied_from:		# in case this is TBR
-		cl.Mail(ui, repo)
-
-	# submit changes locally
-	message = cl.desc.rstrip() + "\n\n" + about
-	typecheck(message, str)
-
-	set_status("pushing " + cl.name + " to remote server")
-
-	if hg_outgoing(ui, repo):
-		raise hg_util.Abort("local repository corrupt or out-of-phase with remote: found outgoing changes")
-	
-	old_heads = len(hg_heads(ui, repo).split())
-
-	global commit_okay
-	commit_okay = True
-	ret = hg_commit(ui, repo, *['path:'+f for f in cl.files], message=message, user=userline)
-	commit_okay = False
-	if ret:
-		return "nothing changed"
-	node = repo["-1"].node()
-	# push to remote; if it fails for any reason, roll back
-	try:
-		new_heads = len(hg_heads(ui, repo).split())
-		if old_heads != new_heads and not (old_heads == 0 and new_heads == 1):
-			# Created new head, so we weren't up to date.
-			need_sync()
-
-		# Push changes to remote.  If it works, we're committed.  If not, roll back.
-		try:
-			hg_push(ui, repo)
-		except hg_error.Abort, e:
-			if e.message.find("push creates new heads") >= 0:
-				# Remote repository had changes we missed.
-				need_sync()
-			raise
-	except:
-		real_rollback()
-		raise
-
-	# We're committed. Upload final patch, close review, add commit message.
-	changeURL = hg_node.short(node)
-	url = ui.expandpath("default")
-	m = re.match("(^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/?)" + "|" +
-		"(^https?://([^@/]+@)?code\.google\.com/p/([^/.]+)(\.[^./]+)?/?)", url)
-	if m:
-		if m.group(1): # prj.googlecode.com/hg/ case
-			changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(3), changeURL)
-		elif m.group(4) and m.group(7): # code.google.com/p/prj.subrepo/ case
-			changeURL = "http://code.google.com/p/%s/source/detail?r=%s&repo=%s" % (m.group(6), changeURL, m.group(7)[1:])
-		elif m.group(4): # code.google.com/p/prj/ case
-			changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(6), changeURL)
-		else:
-			print >>sys.stderr, "URL: ", url
-	else:
-		print >>sys.stderr, "URL: ", url
-	pmsg = "*** Submitted as " + changeURL + " ***\n\n" + message
-
-	# When posting, move reviewers to CC line,
-	# so that the issue stops showing up in their "My Issues" page.
-	PostMessage(ui, cl.name, pmsg, reviewers="", cc=JoinComma(cl.reviewer+cl.cc))
-
-	if not cl.copied_from:
-		EditDesc(cl.name, closed=True, private=cl.private)
-	cl.Delete(ui, repo)
-
-	c = repo[None]
-	if c.branch() == releaseBranch and not c.modified() and not c.added() and not c.removed():
-		ui.write("switching from %s to default branch.\n" % releaseBranch)
-		err = hg_clean(repo, "default")
-		if err:
-			return err
-	return None
-
-#######################################################################
-# hg sync
-
-@hgcommand
-def sync(ui, repo, **opts):
-	"""synchronize with remote repository
-
-	Incorporates recent changes from the remote repository
-	into the local repository.
-	"""
-	if codereview_disabled:
-		return codereview_disabled
-
-	if not opts["local"]:
-		err = hg_pull(ui, repo, update=True)
-		if err:
-			return err
-	sync_changes(ui, repo)
-
-def sync_changes(ui, repo):
-	# Look through recent change log descriptions to find
-	# potential references to http://.*/our-CL-number.
-	# Double-check them by looking at the Rietveld log.
-	for rev in hg_log(ui, repo, limit=100, template="{node}\n").split():
-		desc = repo[rev].description().strip()
-		for clname in re.findall('(?m)^http://(?:[^\n]+)/([0-9]+)$', desc):
-			if IsLocalCL(ui, repo, clname) and IsRietveldSubmitted(ui, clname, repo[rev].hex()):
-				ui.warn("CL %s submitted as %s; closing\n" % (clname, repo[rev]))
-				cl, err = LoadCL(ui, repo, clname, web=False)
-				if err != "":
-					ui.warn("loading CL %s: %s\n" % (clname, err))
-					continue
-				if not cl.copied_from:
-					EditDesc(cl.name, closed=True, private=cl.private)
-				cl.Delete(ui, repo)
-
-	# Remove files that are not modified from the CLs in which they appear.
-	all = LoadAllCL(ui, repo, web=False)
-	changed = ChangedFiles(ui, repo, [])
-	for cl in all.values():
-		extra = Sub(cl.files, changed)
-		if extra:
-			ui.warn("Removing unmodified files from CL %s:\n" % (cl.name,))
-			for f in extra:
-				ui.warn("\t%s\n" % (f,))
-			cl.files = Sub(cl.files, extra)
-			cl.Flush(ui, repo)
-		if not cl.files:
-			if not cl.copied_from:
-				ui.warn("CL %s has no files; delete (abandon) with hg change -d %s\n" % (cl.name, cl.name))
-			else:
-				ui.warn("CL %s has no files; delete locally with hg change -D %s\n" % (cl.name, cl.name))
-	return
-
-#######################################################################
-# hg upload
-
-@hgcommand
-def upload(ui, repo, name, **opts):
-	"""upload diffs to the code review server
-
-	Uploads the current modifications for a given change to the server.
-	"""
-	if codereview_disabled:
-		return codereview_disabled
-
-	repo.ui.quiet = True
-	cl, err = LoadCL(ui, repo, name, web=True)
-	if err != "":
-		return err
-	if not cl.local:
-		return "cannot upload non-local change"
-	cl.Upload(ui, repo)
-	print "%s%s\n" % (server_url_base, cl.name)
-	return
-
-#######################################################################
-# Table of commands, supplied to Mercurial for installation.
-
-review_opts = [
-	('r', 'reviewer', '', 'add reviewer'),
-	('', 'cc', '', 'add cc'),
-	('', 'tbr', '', 'add future reviewer'),
-	('m', 'message', '', 'change description (for new change)'),
-]
-
-cmdtable = {
-	# The ^ means to show this command in the help text that
-	# is printed when running hg with no arguments.
-	"^change": (
-		change,
-		[
-			('d', 'delete', None, 'delete existing change list'),
-			('D', 'deletelocal', None, 'delete locally, but do not change CL on server'),
-			('i', 'stdin', None, 'read change list from standard input'),
-			('o', 'stdout', None, 'print change list to standard output'),
-			('p', 'pending', None, 'print pending summary to standard output'),
-		],
-		"[-d | -D] [-i] [-o] change# or FILE ..."
-	),
-	"^clpatch": (
-		clpatch,
-		[
-			('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
-			('', 'no_incoming', None, 'disable check for incoming changes'),
-		],
-		"change#"
-	),
-	# Would prefer to call this codereview-login, but then
-	# hg help codereview prints the help for this command
-	# instead of the help for the extension.
-	"code-login": (
-		code_login,
-		[],
-		"",
-	),
-	"^download": (
-		download,
-		[],
-		"change#"
-	),
-	"^file": (
-		file,
-		[
-			('d', 'delete', None, 'delete files from change list (but not repository)'),
-		],
-		"[-d] change# FILE ..."
-	),
-	"^gofmt": (
-		gofmt,
-		[
-			('l', 'list', None, 'list files that would change, but do not edit them'),
-		],
-		"FILE ..."
-	),
-	"^pending|p": (
-		pending,
-		[
-			('s', 'short', False, 'show short result form'),
-			('', 'quick', False, 'do not consult codereview server'),
-		],
-		"[FILE ...]"
-	),
-	"^ps": (
-		ps,
-		[],
-		"[FILE ...]"
-	),
-	"^pq": (
-		pq,
-		[],
-		"[FILE ...]"
-	),
-	"^mail": (
-		mail,
-		review_opts + [
-		] + hg_commands.walkopts,
-		"[-r reviewer] [--cc cc] [change# | file ...]"
-	),
-	"^release-apply": (
-		release_apply,
-		[
-			('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
-			('', 'no_incoming', None, 'disable check for incoming changes'),
-		],
-		"change#"
-	),
-	# TODO: release-start, release-tag, weekly-tag
-	"^submit": (
-		submit,
-		review_opts + [
-			('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
-		] + hg_commands.walkopts + hg_commands.commitopts + hg_commands.commitopts2,
-		"[-r reviewer] [--cc cc] [change# | file ...]"
-	),
-	"^sync": (
-		sync,
-		[
-			('', 'local', None, 'do not pull changes from remote repository')
-		],
-		"[--local]",
-	),
-	"^undo": (
-		undo,
-		[
-			('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
-			('', 'no_incoming', None, 'disable check for incoming changes'),
-		],
-		"change#"
-	),
-	"^upload": (
-		upload,
-		[],
-		"change#"
-	),
-}
-
-#######################################################################
-# Mercurial extension initialization
-
-def norollback(*pats, **opts):
-	"""(disabled when using this extension)"""
-	raise hg_util.Abort("codereview extension enabled; use undo instead of rollback")
-
-codereview_init = False
-
-def reposetup(ui, repo):
-	global codereview_disabled
-	global defaultcc
-	
-	# reposetup gets called both for the local repository
-	# and also for any repository we are pulling or pushing to.
-	# Only initialize the first time.
-	global codereview_init
-	if codereview_init:
-		return
-	codereview_init = True
-
-	# Read repository-specific options from lib/codereview/codereview.cfg or codereview.cfg.
-	root = ''
-	try:
-		root = repo.root
-	except:
-		# Yes, repo might not have root; see issue 959.
-		codereview_disabled = 'codereview disabled: repository has no root'
-		return
-	
-	repo_config_path = ''
-	p1 = root + '/lib/codereview/codereview.cfg'
-	p2 = root + '/codereview.cfg'
-	if os.access(p1, os.F_OK):
-		repo_config_path = p1
-	else:
-		repo_config_path = p2
-	try:
-		f = open(repo_config_path)
-		for line in f:
-			if line.startswith('defaultcc:'):
-				defaultcc = SplitCommaSpace(line[len('defaultcc:'):])
-			if line.startswith('contributors:'):
-				global contributorsURL
-				contributorsURL = line[len('contributors:'):].strip()
-	except:
-		codereview_disabled = 'codereview disabled: cannot open ' + repo_config_path
-		return
-
-	remote = ui.config("paths", "default", "")
-	if remote.find("://") < 0:
-		raise hg_util.Abort("codereview: default path '%s' is not a URL" % (remote,))
-
-	InstallMatch(ui, repo)
-	RietveldSetup(ui, repo)
-
-	# Disable the Mercurial commands that might change the repository.
-	# Only commands in this extension are supposed to do that.
-	ui.setconfig("hooks", "precommit.codereview", precommithook)
-
-	# Rollback removes an existing commit.  Don't do that either.
-	global real_rollback
-	real_rollback = repo.rollback
-	repo.rollback = norollback
-	
-
-#######################################################################
-# Wrappers around upload.py for interacting with Rietveld
-
-from HTMLParser import HTMLParser
-
-# HTML form parser
-class FormParser(HTMLParser):
-	def __init__(self):
-		self.map = {}
-		self.curtag = None
-		self.curdata = None
-		HTMLParser.__init__(self)
-	def handle_starttag(self, tag, attrs):
-		if tag == "input":
-			key = None
-			value = ''
-			for a in attrs:
-				if a[0] == 'name':
-					key = a[1]
-				if a[0] == 'value':
-					value = a[1]
-			if key is not None:
-				self.map[key] = value
-		if tag == "textarea":
-			key = None
-			for a in attrs:
-				if a[0] == 'name':
-					key = a[1]
-			if key is not None:
-				self.curtag = key
-				self.curdata = ''
-	def handle_endtag(self, tag):
-		if tag == "textarea" and self.curtag is not None:
-			self.map[self.curtag] = self.curdata
-			self.curtag = None
-			self.curdata = None
-	def handle_charref(self, name):
-		self.handle_data(unichr(int(name)))
-	def handle_entityref(self, name):
-		import htmlentitydefs
-		if name in htmlentitydefs.entitydefs:
-			self.handle_data(htmlentitydefs.entitydefs[name])
-		else:
-			self.handle_data("&" + name + ";")
-	def handle_data(self, data):
-		if self.curdata is not None:
-			self.curdata += data
-
-def JSONGet(ui, path):
-	try:
-		data = MySend(path, force_auth=False)
-		typecheck(data, str)
-		d = fix_json(json.loads(data))
-	except:
-		ui.warn("JSONGet %s: %s\n" % (path, ExceptionDetail()))
-		return None
-	return d
-
-# Clean up json parser output to match our expectations:
-#   * all strings are UTF-8-encoded str, not unicode.
-#   * missing fields are missing, not None,
-#     so that d.get("foo", defaultvalue) works.
-def fix_json(x):
-	if type(x) in [str, int, float, bool, type(None)]:
-		pass
-	elif type(x) is unicode:
-		x = x.encode("utf-8")
-	elif type(x) is list:
-		for i in range(len(x)):
-			x[i] = fix_json(x[i])
-	elif type(x) is dict:
-		todel = []
-		for k in x:
-			if x[k] is None:
-				todel.append(k)
-			else:
-				x[k] = fix_json(x[k])
-		for k in todel:
-			del x[k]
-	else:
-		raise hg_util.Abort("unknown type " + str(type(x)) + " in fix_json")
-	if type(x) is str:
-		x = x.replace('\r\n', '\n')
-	return x
-
-def IsRietveldSubmitted(ui, clname, hex):
-	dict = JSONGet(ui, "/api/" + clname + "?messages=true")
-	if dict is None:
-		return False
-	for msg in dict.get("messages", []):
-		text = msg.get("text", "")
-		m = re.match('\*\*\* Submitted as [^*]*?([0-9a-f]+) \*\*\*', text)
-		if m is not None and len(m.group(1)) >= 8 and hex.startswith(m.group(1)):
-			return True
-	return False
-
-def IsRietveldMailed(cl):
-	for msg in cl.dict.get("messages", []):
-		if msg.get("text", "").find("I'd like you to review this change") >= 0:
-			return True
-	return False
-
-def DownloadCL(ui, repo, clname):
-	set_status("downloading CL " + clname)
-	cl, err = LoadCL(ui, repo, clname, web=True)
-	if err != "":
-		return None, None, None, "error loading CL %s: %s" % (clname, err)
-
-	# Find most recent diff
-	diffs = cl.dict.get("patchsets", [])
-	if not diffs:
-		return None, None, None, "CL has no patch sets"
-	patchid = diffs[-1]
-
-	patchset = JSONGet(ui, "/api/" + clname + "/" + str(patchid))
-	if patchset is None:
-		return None, None, None, "error loading CL patchset %s/%d" % (clname, patchid)
-	if patchset.get("patchset", 0) != patchid:
-		return None, None, None, "malformed patchset information"
-	
-	vers = ""
-	msg = patchset.get("message", "").split()
-	if len(msg) >= 3 and msg[0] == "diff" and msg[1] == "-r":
-		vers = msg[2]
-	diff = "/download/issue" + clname + "_" + str(patchid) + ".diff"
-
-	diffdata = MySend(diff, force_auth=False)
-	
-	# Print warning if email is not in CONTRIBUTORS file.
-	email = cl.dict.get("owner_email", "")
-	if not email:
-		return None, None, None, "cannot find owner for %s" % (clname)
-	him = FindContributor(ui, repo, email)
-	me = FindContributor(ui, repo, None)
-	if him == me:
-		cl.mailed = IsRietveldMailed(cl)
-	else:
-		cl.copied_from = email
-
-	return cl, vers, diffdata, ""
-
-def MySend(request_path, payload=None,
-		content_type="application/octet-stream",
-		timeout=None, force_auth=True,
-		**kwargs):
-	"""Run MySend1 maybe twice, because Rietveld is unreliable."""
-	try:
-		return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
-	except Exception, e:
-		if type(e) != urllib2.HTTPError or e.code != 500:	# only retry on HTTP 500 error
-			raise
-		print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
-		time.sleep(2)
-		return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
-
-# Like upload.py Send but only authenticates when the
-# redirect is to www.google.com/accounts.  This keeps
-# unnecessary redirects from happening during testing.
-def MySend1(request_path, payload=None,
-				content_type="application/octet-stream",
-				timeout=None, force_auth=True,
-				**kwargs):
-	"""Sends an RPC and returns the response.
-
-	Args:
-		request_path: The path to send the request to, eg /api/appversion/create.
-		payload: The body of the request, or None to send an empty request.
-		content_type: The Content-Type header to use.
-		timeout: timeout in seconds; default None i.e. no timeout.
-			(Note: for large requests on OS X, the timeout doesn't work right.)
-		kwargs: Any keyword arguments are converted into query string parameters.
-
-	Returns:
-		The response body, as a string.
-	"""
-	# TODO: Don't require authentication.  Let the server say
-	# whether it is necessary.
-	global rpc
-	if rpc == None:
-		rpc = GetRpcServer(upload_options)
-	self = rpc
-	if not self.authenticated and force_auth:
-		self._Authenticate()
-	if request_path is None:
-		return
-
-	old_timeout = socket.getdefaulttimeout()
-	socket.setdefaulttimeout(timeout)
-	try:
-		tries = 0
-		while True:
-			tries += 1
-			args = dict(kwargs)
-			url = "http://%s%s" % (self.host, request_path)
-			if args:
-				url += "?" + urllib.urlencode(args)
-			req = self._CreateRequest(url=url, data=payload)
-			req.add_header("Content-Type", content_type)
-			try:
-				f = self.opener.open(req)
-				response = f.read()
-				f.close()
-				# Translate \r\n into \n, because Rietveld doesn't.
-				response = response.replace('\r\n', '\n')
-				# who knows what urllib will give us
-				if type(response) == unicode:
-					response = response.encode("utf-8")
-				typecheck(response, str)
-				return response
-			except urllib2.HTTPError, e:
-				if tries > 3:
-					raise
-				elif e.code == 401:
-					self._Authenticate()
-				elif e.code == 302:
-					loc = e.info()["location"]
-					if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
-						return ''
-					self._Authenticate()
-				else:
-					raise
-	finally:
-		socket.setdefaulttimeout(old_timeout)
-
-def GetForm(url):
-	f = FormParser()
-	f.feed(ustr(MySend(url)))	# f.feed wants unicode
-	f.close()
-	# convert back to utf-8 to restore sanity
-	m = {}
-	for k,v in f.map.items():
-		m[k.encode("utf-8")] = v.replace("\r\n", "\n").encode("utf-8")
-	return m
-
-def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=False, private=False):
-	set_status("uploading change to description")
-	form_fields = GetForm("/" + issue + "/edit")
-	if subject is not None:
-		form_fields['subject'] = subject
-	if desc is not None:
-		form_fields['description'] = desc
-	if reviewers is not None:
-		form_fields['reviewers'] = reviewers
-	if cc is not None:
-		form_fields['cc'] = cc
-	if closed:
-		form_fields['closed'] = "checked"
-	if private:
-		form_fields['private'] = "checked"
-	ctype, body = EncodeMultipartFormData(form_fields.items(), [])
-	response = MySend("/" + issue + "/edit", body, content_type=ctype)
-	if response != "":
-		print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
-		sys.exit(2)
-
-def PostMessage(ui, issue, message, reviewers=None, cc=None, send_mail=True, subject=None):
-	set_status("uploading message")
-	form_fields = GetForm("/" + issue + "/publish")
-	if reviewers is not None:
-		form_fields['reviewers'] = reviewers
-	if cc is not None:
-		form_fields['cc'] = cc
-	if send_mail:
-		form_fields['send_mail'] = "checked"
-	else:
-		del form_fields['send_mail']
-	if subject is not None:
-		form_fields['subject'] = subject
-	form_fields['message'] = message
-	
-	form_fields['message_only'] = '1'	# Don't include draft comments
-	if reviewers is not None or cc is not None:
-		form_fields['message_only'] = ''	# Must set '' in order to override cc/reviewer
-	ctype = "applications/x-www-form-urlencoded"
-	body = urllib.urlencode(form_fields)
-	response = MySend("/" + issue + "/publish", body, content_type=ctype)
-	if response != "":
-		print response
-		sys.exit(2)
-
-class opt(object):
-	pass
-
-def RietveldSetup(ui, repo):
-	global force_google_account
-	global rpc
-	global server
-	global server_url_base
-	global upload_options
-	global verbosity
-
-	if not ui.verbose:
-		verbosity = 0
-
-	# Config options.
-	x = ui.config("codereview", "server")
-	if x is not None:
-		server = x
-
-	# TODO(rsc): Take from ui.username?
-	email = None
-	x = ui.config("codereview", "email")
-	if x is not None:
-		email = x
-
-	server_url_base = "http://" + server + "/"
-
-	testing = ui.config("codereview", "testing")
-	force_google_account = ui.configbool("codereview", "force_google_account", False)
-
-	upload_options = opt()
-	upload_options.email = email
-	upload_options.host = None
-	upload_options.verbose = 0
-	upload_options.description = None
-	upload_options.description_file = None
-	upload_options.reviewers = None
-	upload_options.cc = None
-	upload_options.message = None
-	upload_options.issue = None
-	upload_options.download_base = False
-	upload_options.revision = None
-	upload_options.send_mail = False
-	upload_options.vcs = None
-	upload_options.server = server
-	upload_options.save_cookies = True
-
-	if testing:
-		upload_options.save_cookies = False
-		upload_options.email = "test@example.com"
-
-	rpc = None
-	
-	global releaseBranch
-	tags = repo.branchtags().keys()
-	if 'release-branch.go10' in tags:
-		# NOTE(rsc): This tags.sort is going to get the wrong
-		# answer when comparing release-branch.go9 with
-		# release-branch.go10.  It will be a while before we care.
-		raise hg_util.Abort('tags.sort needs to be fixed for release-branch.go10')
-	tags.sort()
-	for t in tags:
-		if t.startswith('release-branch.go'):
-			releaseBranch = t			
-
-#######################################################################
-# http://codereview.appspot.com/static/upload.py, heavily edited.
-
-#!/usr/bin/env python
-#
-# Copyright 2007 Google Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#	http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tool for uploading diffs from a version control system to the codereview app.
-
-Usage summary: upload.py [options] [-- diff_options]
-
-Diff options are passed to the diff command of the underlying system.
-
-Supported version control systems:
-	Git
-	Mercurial
-	Subversion
-
-It is important for Git/Mercurial users to specify a tree/node/branch to diff
-against by using the '--rev' option.
-"""
-# This code is derived from appcfg.py in the App Engine SDK (open source),
-# and from ASPN recipe #146306.
-
-import cookielib
-import getpass
-import logging
-import mimetypes
-import optparse
-import os
-import re
-import socket
-import subprocess
-import sys
-import urllib
-import urllib2
-import urlparse
-
-# The md5 module was deprecated in Python 2.5.
-try:
-	from hashlib import md5
-except ImportError:
-	from md5 import md5
-
-try:
-	import readline
-except ImportError:
-	pass
-
-# The logging verbosity:
-#  0: Errors only.
-#  1: Status messages.
-#  2: Info logs.
-#  3: Debug logs.
-verbosity = 1
-
-# Max size of patch or base file.
-MAX_UPLOAD_SIZE = 900 * 1024
-
-# whitelist for non-binary filetypes which do not start with "text/"
-# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
-TEXT_MIMETYPES = [
-	'application/javascript',
-	'application/x-javascript',
-	'application/x-freemind'
-]
-
-def GetEmail(prompt):
-	"""Prompts the user for their email address and returns it.
-
-	The last used email address is saved to a file and offered up as a suggestion
-	to the user. If the user presses enter without typing in anything the last
-	used email address is used. If the user enters a new address, it is saved
-	for next time we prompt.
-
-	"""
-	last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
-	last_email = ""
-	if os.path.exists(last_email_file_name):
-		try:
-			last_email_file = open(last_email_file_name, "r")
-			last_email = last_email_file.readline().strip("\n")
-			last_email_file.close()
-			prompt += " [%s]" % last_email
-		except IOError, e:
-			pass
-	email = raw_input(prompt + ": ").strip()
-	if email:
-		try:
-			last_email_file = open(last_email_file_name, "w")
-			last_email_file.write(email)
-			last_email_file.close()
-		except IOError, e:
-			pass
-	else:
-		email = last_email
-	return email
-
-
-def StatusUpdate(msg):
-	"""Print a status message to stdout.
-
-	If 'verbosity' is greater than 0, print the message.
-
-	Args:
-		msg: The string to print.
-	"""
-	if verbosity > 0:
-		print msg
-
-
-def ErrorExit(msg):
-	"""Print an error message to stderr and exit."""
-	print >>sys.stderr, msg
-	sys.exit(1)
-
-
-class ClientLoginError(urllib2.HTTPError):
-	"""Raised to indicate there was an error authenticating with ClientLogin."""
-
-	def __init__(self, url, code, msg, headers, args):
-		urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
-		self.args = args
-		self.reason = args["Error"]
-
-
-class AbstractRpcServer(object):
-	"""Provides a common interface for a simple RPC server."""
-
-	def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False):
-		"""Creates a new HttpRpcServer.
-
-		Args:
-			host: The host to send requests to.
-			auth_function: A function that takes no arguments and returns an
-				(email, password) tuple when called. Will be called if authentication
-				is required.
-			host_override: The host header to send to the server (defaults to host).
-			extra_headers: A dict of extra headers to append to every request.
-			save_cookies: If True, save the authentication cookies to local disk.
-				If False, use an in-memory cookiejar instead.  Subclasses must
-				implement this functionality.  Defaults to False.
-		"""
-		self.host = host
-		self.host_override = host_override
-		self.auth_function = auth_function
-		self.authenticated = False
-		self.extra_headers = extra_headers
-		self.save_cookies = save_cookies
-		self.opener = self._GetOpener()
-		if self.host_override:
-			logging.info("Server: %s; Host: %s", self.host, self.host_override)
-		else:
-			logging.info("Server: %s", self.host)
-
-	def _GetOpener(self):
-		"""Returns an OpenerDirector for making HTTP requests.
-
-		Returns:
-			A urllib2.OpenerDirector object.
-		"""
-		raise NotImplementedError()
-
-	def _CreateRequest(self, url, data=None):
-		"""Creates a new urllib request."""
-		logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
-		req = urllib2.Request(url, data=data)
-		if self.host_override:
-			req.add_header("Host", self.host_override)
-		for key, value in self.extra_headers.iteritems():
-			req.add_header(key, value)
-		return req
-
-	def _GetAuthToken(self, email, password):
-		"""Uses ClientLogin to authenticate the user, returning an auth token.
-
-		Args:
-			email:    The user's email address
-			password: The user's password
-
-		Raises:
-			ClientLoginError: If there was an error authenticating with ClientLogin.
-			HTTPError: If there was some other form of HTTP error.
-
-		Returns:
-			The authentication token returned by ClientLogin.
-		"""
-		account_type = "GOOGLE"
-		if self.host.endswith(".google.com") and not force_google_account:
-			# Needed for use inside Google.
-			account_type = "HOSTED"
-		req = self._CreateRequest(
-				url="https://www.google.com/accounts/ClientLogin",
-				data=urllib.urlencode({
-						"Email": email,
-						"Passwd": password,
-						"service": "ah",
-						"source": "rietveld-codereview-upload",
-						"accountType": account_type,
-				}),
-		)
-		try:
-			response = self.opener.open(req)
-			response_body = response.read()
-			response_dict = dict(x.split("=") for x in response_body.split("\n") if x)
-			return response_dict["Auth"]
-		except urllib2.HTTPError, e:
-			if e.code == 403:
-				body = e.read()
-				response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
-				raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)
-			else:
-				raise
-
-	def _GetAuthCookie(self, auth_token):
-		"""Fetches authentication cookies for an authentication token.
-
-		Args:
-			auth_token: The authentication token returned by ClientLogin.
-
-		Raises:
-			HTTPError: If there was an error fetching the authentication cookies.
-		"""
-		# This is a dummy value to allow us to identify when we're successful.
-		continue_location = "http://localhost/"
-		args = {"continue": continue_location, "auth": auth_token}
-		req = self._CreateRequest("http://%s/_ah/login?%s" % (self.host, urllib.urlencode(args)))
-		try:
-			response = self.opener.open(req)
-		except urllib2.HTTPError, e:
-			response = e
-		if (response.code != 302 or
-				response.info()["location"] != continue_location):
-			raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp)
-		self.authenticated = True
-
-	def _Authenticate(self):
-		"""Authenticates the user.
-
-		The authentication process works as follows:
-		1) We get a username and password from the user
-		2) We use ClientLogin to obtain an AUTH token for the user
-				(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
-		3) We pass the auth token to /_ah/login on the server to obtain an
-				authentication cookie. If login was successful, it tries to redirect
-				us to the URL we provided.
-
-		If we attempt to access the upload API without first obtaining an
-		authentication cookie, it returns a 401 response (or a 302) and
-		directs us to authenticate ourselves with ClientLogin.
-		"""
-		for i in range(3):
-			credentials = self.auth_function()
-			try:
-				auth_token = self._GetAuthToken(credentials[0], credentials[1])
-			except ClientLoginError, e:
-				if e.reason == "BadAuthentication":
-					print >>sys.stderr, "Invalid username or password."
-					continue
-				if e.reason == "CaptchaRequired":
-					print >>sys.stderr, (
-						"Please go to\n"
-						"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
-						"and verify you are a human.  Then try again.")
-					break
-				if e.reason == "NotVerified":
-					print >>sys.stderr, "Account not verified."
-					break
-				if e.reason == "TermsNotAgreed":
-					print >>sys.stderr, "User has not agreed to TOS."
-					break
-				if e.reason == "AccountDeleted":
-					print >>sys.stderr, "The user account has been deleted."
-					break
-				if e.reason == "AccountDisabled":
-					print >>sys.stderr, "The user account has been disabled."
-					break
-				if e.reason == "ServiceDisabled":
-					print >>sys.stderr, "The user's access to the service has been disabled."
-					break
-				if e.reason == "ServiceUnavailable":
-					print >>sys.stderr, "The service is not available; try again later."
-					break
-				raise
-			self._GetAuthCookie(auth_token)
-			return
-
-	def Send(self, request_path, payload=None,
-					content_type="application/octet-stream",
-					timeout=None,
-					**kwargs):
-		"""Sends an RPC and returns the response.
-
-		Args:
-			request_path: The path to send the request to, eg /api/appversion/create.
-			payload: The body of the request, or None to send an empty request.
-			content_type: The Content-Type header to use.
-			timeout: timeout in seconds; default None i.e. no timeout.
-				(Note: for large requests on OS X, the timeout doesn't work right.)
-			kwargs: Any keyword arguments are converted into query string parameters.
-
-		Returns:
-			The response body, as a string.
-		"""
-		# TODO: Don't require authentication.  Let the server say
-		# whether it is necessary.
-		if not self.authenticated:
-			self._Authenticate()
-
-		old_timeout = socket.getdefaulttimeout()
-		socket.setdefaulttimeout(timeout)
-		try:
-			tries = 0
-			while True:
-				tries += 1
-				args = dict(kwargs)
-				url = "http://%s%s" % (self.host, request_path)
-				if args:
-					url += "?" + urllib.urlencode(args)
-				req = self._CreateRequest(url=url, data=payload)
-				req.add_header("Content-Type", content_type)
-				try:
-					f = self.opener.open(req)
-					response = f.read()
-					f.close()
-					return response
-				except urllib2.HTTPError, e:
-					if tries > 3:
-						raise
-					elif e.code == 401 or e.code == 302:
-						self._Authenticate()
-					else:
-						raise
-		finally:
-			socket.setdefaulttimeout(old_timeout)
-
-
-class HttpRpcServer(AbstractRpcServer):
-	"""Provides a simplified RPC-style interface for HTTP requests."""
-
-	def _Authenticate(self):
-		"""Save the cookie jar after authentication."""
-		super(HttpRpcServer, self)._Authenticate()
-		if self.save_cookies:
-			StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
-			self.cookie_jar.save()
-
-	def _GetOpener(self):
-		"""Returns an OpenerDirector that supports cookies and ignores redirects.
-
-		Returns:
-			A urllib2.OpenerDirector object.
-		"""
-		opener = urllib2.OpenerDirector()
-		opener.add_handler(urllib2.ProxyHandler())
-		opener.add_handler(urllib2.UnknownHandler())
-		opener.add_handler(urllib2.HTTPHandler())
-		opener.add_handler(urllib2.HTTPDefaultErrorHandler())
-		opener.add_handler(urllib2.HTTPSHandler())
-		opener.add_handler(urllib2.HTTPErrorProcessor())
-		if self.save_cookies:
-			self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
-			self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
-			if os.path.exists(self.cookie_file):
-				try:
-					self.cookie_jar.load()
-					self.authenticated = True
-					StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file)
-				except (cookielib.LoadError, IOError):
-					# Failed to load cookies - just ignore them.
-					pass
-			else:
-				# Create an empty cookie file with mode 600
-				fd = os.open(self.cookie_file, os.O_CREAT, 0600)
-				os.close(fd)
-			# Always chmod the cookie file
-			os.chmod(self.cookie_file, 0600)
-		else:
-			# Don't save cookies across runs of update.py.
-			self.cookie_jar = cookielib.CookieJar()
-		opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
-		return opener
-
-
-def GetRpcServer(options):
-	"""Returns an instance of an AbstractRpcServer.
-
-	Returns:
-		A new AbstractRpcServer, on which RPC calls can be made.
-	"""
-
-	rpc_server_class = HttpRpcServer
-
-	def GetUserCredentials():
-		"""Prompts the user for a username and password."""
-		# Disable status prints so they don't obscure the password prompt.
-		global global_status
-		st = global_status
-		global_status = None
-
-		email = options.email
-		if email is None:
-			email = GetEmail("Email (login for uploading to %s)" % options.server)
-		password = getpass.getpass("Password for %s: " % email)
-
-		# Put status back.
-		global_status = st
-		return (email, password)
-
-	# If this is the dev_appserver, use fake authentication.
-	host = (options.host or options.server).lower()
-	if host == "localhost" or host.startswith("localhost:"):
-		email = options.email
-		if email is None:
-			email = "test@example.com"
-			logging.info("Using debug user %s.  Override with --email" % email)
-		server = rpc_server_class(
-				options.server,
-				lambda: (email, "password"),
-				host_override=options.host,
-				extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email},
-				save_cookies=options.save_cookies)
-		# Don't try to talk to ClientLogin.
-		server.authenticated = True
-		return server
-
-	return rpc_server_class(options.server, GetUserCredentials,
-		host_override=options.host, save_cookies=options.save_cookies)
-
-
-def EncodeMultipartFormData(fields, files):
-	"""Encode form fields for multipart/form-data.
-
-	Args:
-		fields: A sequence of (name, value) elements for regular form fields.
-		files: A sequence of (name, filename, value) elements for data to be
-					uploaded as files.
-	Returns:
-		(content_type, body) ready for httplib.HTTP instance.
-
-	Source:
-		http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
-	"""
-	BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
-	CRLF = '\r\n'
-	lines = []
-	for (key, value) in fields:
-		typecheck(key, str)
-		typecheck(value, str)
-		lines.append('--' + BOUNDARY)
-		lines.append('Content-Disposition: form-data; name="%s"' % key)
-		lines.append('')
-		lines.append(value)
-	for (key, filename, value) in files:
-		typecheck(key, str)
-		typecheck(filename, str)
-		typecheck(value, str)
-		lines.append('--' + BOUNDARY)
-		lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
-		lines.append('Content-Type: %s' % GetContentType(filename))
-		lines.append('')
-		lines.append(value)
-	lines.append('--' + BOUNDARY + '--')
-	lines.append('')
-	body = CRLF.join(lines)
-	content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
-	return content_type, body
-
-
-def GetContentType(filename):
-	"""Helper to guess the content-type from the filename."""
-	return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
-
-
-# Use a shell for subcommands on Windows to get a PATH search.
-use_shell = sys.platform.startswith("win")
-
-def RunShellWithReturnCode(command, print_output=False,
-		universal_newlines=True, env=os.environ):
-	"""Executes a command and returns the output from stdout and the return code.
-
-	Args:
-		command: Command to execute.
-		print_output: If True, the output is printed to stdout.
-			If False, both stdout and stderr are ignored.
-		universal_newlines: Use universal_newlines flag (default: True).
-
-	Returns:
-		Tuple (output, return code)
-	"""
-	logging.info("Running %s", command)
-	p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-		shell=use_shell, universal_newlines=universal_newlines, env=env)
-	if print_output:
-		output_array = []
-		while True:
-			line = p.stdout.readline()
-			if not line:
-				break
-			print line.strip("\n")
-			output_array.append(line)
-		output = "".join(output_array)
-	else:
-		output = p.stdout.read()
-	p.wait()
-	errout = p.stderr.read()
-	if print_output and errout:
-		print >>sys.stderr, errout
-	p.stdout.close()
-	p.stderr.close()
-	return output, p.returncode
-
-
-def RunShell(command, silent_ok=False, universal_newlines=True,
-		print_output=False, env=os.environ):
-	data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines, env)
-	if retcode:
-		ErrorExit("Got error status from %s:\n%s" % (command, data))
-	if not silent_ok and not data:
-		ErrorExit("No output from %s" % command)
-	return data
-
-
-class VersionControlSystem(object):
-	"""Abstract base class providing an interface to the VCS."""
-
-	def __init__(self, options):
-		"""Constructor.
-
-		Args:
-			options: Command line options.
-		"""
-		self.options = options
-
-	def GenerateDiff(self, args):
-		"""Return the current diff as a string.
-
-		Args:
-			args: Extra arguments to pass to the diff command.
-		"""
-		raise NotImplementedError(
-				"abstract method -- subclass %s must override" % self.__class__)
-
-	def GetUnknownFiles(self):
-		"""Return a list of files unknown to the VCS."""
-		raise NotImplementedError(
-				"abstract method -- subclass %s must override" % self.__class__)
-
-	def CheckForUnknownFiles(self):
-		"""Show an "are you sure?" prompt if there are unknown files."""
-		unknown_files = self.GetUnknownFiles()
-		if unknown_files:
-			print "The following files are not added to version control:"
-			for line in unknown_files:
-				print line
-			prompt = "Are you sure to continue?(y/N) "
-			answer = raw_input(prompt).strip()
-			if answer != "y":
-				ErrorExit("User aborted")
-
-	def GetBaseFile(self, filename):
-		"""Get the content of the upstream version of a file.
-
-		Returns:
-			A tuple (base_content, new_content, is_binary, status)
-				base_content: The contents of the base file.
-				new_content: For text files, this is empty.  For binary files, this is
-					the contents of the new file, since the diff output won't contain
-					information to reconstruct the current file.
-				is_binary: True iff the file is binary.
-				status: The status of the file.
-		"""
-
-		raise NotImplementedError(
-				"abstract method -- subclass %s must override" % self.__class__)
-
-
-	def GetBaseFiles(self, diff):
-		"""Helper that calls GetBase file for each file in the patch.
-
-		Returns:
-			A dictionary that maps from filename to GetBaseFile's tuple.  Filenames
-			are retrieved based on lines that start with "Index:" or
-			"Property changes on:".
-		"""
-		files = {}
-		for line in diff.splitlines(True):
-			if line.startswith('Index:') or line.startswith('Property changes on:'):
-				unused, filename = line.split(':', 1)
-				# On Windows if a file has property changes its filename uses '\'
-				# instead of '/'.
-				filename = to_slash(filename.strip())
-				files[filename] = self.GetBaseFile(filename)
-		return files
-
-
-	def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
-											files):
-		"""Uploads the base files (and if necessary, the current ones as well)."""
-
-		def UploadFile(filename, file_id, content, is_binary, status, is_base):
-			"""Uploads a file to the server."""
-			set_status("uploading " + filename)
-			file_too_large = False
-			if is_base:
-				type = "base"
-			else:
-				type = "current"
-			if len(content) > MAX_UPLOAD_SIZE:
-				print ("Not uploading the %s file for %s because it's too large." %
-							(type, filename))
-				file_too_large = True
-				content = ""
-			checksum = md5(content).hexdigest()
-			if options.verbose > 0 and not file_too_large:
-				print "Uploading %s file for %s" % (type, filename)
-			url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
-			form_fields = [
-				("filename", filename),
-				("status", status),
-				("checksum", checksum),
-				("is_binary", str(is_binary)),
-				("is_current", str(not is_base)),
-			]
-			if file_too_large:
-				form_fields.append(("file_too_large", "1"))
-			if options.email:
-				form_fields.append(("user", options.email))
-			ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)])
-			response_body = rpc_server.Send(url, body, content_type=ctype)
-			if not response_body.startswith("OK"):
-				StatusUpdate("  --> %s" % response_body)
-				sys.exit(1)
-
-		# Don't want to spawn too many threads, nor do we want to
-		# hit Rietveld too hard, or it will start serving 500 errors.
-		# When 8 works, it's no better than 4, and sometimes 8 is
-		# too many for Rietveld to handle.
-		MAX_PARALLEL_UPLOADS = 4
-
-		sema = threading.BoundedSemaphore(MAX_PARALLEL_UPLOADS)
-		upload_threads = []
-		finished_upload_threads = []
-		
-		class UploadFileThread(threading.Thread):
-			def __init__(self, args):
-				threading.Thread.__init__(self)
-				self.args = args
-			def run(self):
-				UploadFile(*self.args)
-				finished_upload_threads.append(self)
-				sema.release()
-
-		def StartUploadFile(*args):
-			sema.acquire()
-			while len(finished_upload_threads) > 0:
-				t = finished_upload_threads.pop()
-				upload_threads.remove(t)
-				t.join()
-			t = UploadFileThread(args)
-			upload_threads.append(t)
-			t.start()
-
-		def WaitForUploads():			
-			for t in upload_threads:
-				t.join()
-
-		patches = dict()
-		[patches.setdefault(v, k) for k, v in patch_list]
-		for filename in patches.keys():
-			base_content, new_content, is_binary, status = files[filename]
-			file_id_str = patches.get(filename)
-			if file_id_str.find("nobase") != -1:
-				base_content = None
-				file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
-			file_id = int(file_id_str)
-			if base_content != None:
-				StartUploadFile(filename, file_id, base_content, is_binary, status, True)
-			if new_content != None:
-				StartUploadFile(filename, file_id, new_content, is_binary, status, False)
-		WaitForUploads()
-
-	def IsImage(self, filename):
-		"""Returns true if the filename has an image extension."""
-		mimetype =  mimetypes.guess_type(filename)[0]
-		if not mimetype:
-			return False
-		return mimetype.startswith("image/")
-
-	def IsBinary(self, filename):
-		"""Returns true if the guessed mimetyped isnt't in text group."""
-		mimetype = mimetypes.guess_type(filename)[0]
-		if not mimetype:
-			return False  # e.g. README, "real" binaries usually have an extension
-		# special case for text files which don't start with text/
-		if mimetype in TEXT_MIMETYPES:
-			return False
-		return not mimetype.startswith("text/")
-
-
-class FakeMercurialUI(object):
-	def __init__(self):
-		self.quiet = True
-		self.output = ''
-	
-	def write(self, *args, **opts):
-		self.output += ' '.join(args)
-	def copy(self):
-		return self
-	def status(self, *args, **opts):
-		pass
-
-	def formatter(self, topic, opts):
-		from mercurial.formatter import plainformatter
-		return plainformatter(self, topic, opts)
-	
-	def readconfig(self, *args, **opts):
-		pass
-	def expandpath(self, *args, **opts):
-		return global_ui.expandpath(*args, **opts)
-	def configitems(self, *args, **opts):
-		return global_ui.configitems(*args, **opts)
-	def config(self, *args, **opts):
-		return global_ui.config(*args, **opts)
-
-use_hg_shell = False	# set to True to shell out to hg always; slower
-
-class MercurialVCS(VersionControlSystem):
-	"""Implementation of the VersionControlSystem interface for Mercurial."""
-
-	def __init__(self, options, ui, repo):
-		super(MercurialVCS, self).__init__(options)
-		self.ui = ui
-		self.repo = repo
-		self.status = None
-		# Absolute path to repository (we can be in a subdir)
-		self.repo_dir = os.path.normpath(repo.root)
-		# Compute the subdir
-		cwd = os.path.normpath(os.getcwd())
-		assert cwd.startswith(self.repo_dir)
-		self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
-		if self.options.revision:
-			self.base_rev = self.options.revision
-		else:
-			mqparent, err = RunShellWithReturnCode(['hg', 'log', '--rev', 'qparent', '--template={node}'])
-			if not err and mqparent != "":
-				self.base_rev = mqparent
-			else:
-				out = RunShell(["hg", "parents", "-q"], silent_ok=True).strip()
-				if not out:
-					# No revisions; use 0 to mean a repository with nothing.
-					out = "0:0"
-				self.base_rev = out.split(':')[1].strip()
-	def _GetRelPath(self, filename):
-		"""Get relative path of a file according to the current directory,
-		given its logical path in the repo."""
-		assert filename.startswith(self.subdir), (filename, self.subdir)
-		return filename[len(self.subdir):].lstrip(r"\/")
-
-	def GenerateDiff(self, extra_args):
-		# If no file specified, restrict to the current subdir
-		extra_args = extra_args or ["."]
-		cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
-		data = RunShell(cmd, silent_ok=True)
-		svndiff = []
-		filecount = 0
-		for line in data.splitlines():
-			m = re.match("diff --git a/(\S+) b/(\S+)", line)
-			if m:
-				# Modify line to make it look like as it comes from svn diff.
-				# With this modification no changes on the server side are required
-				# to make upload.py work with Mercurial repos.
-				# NOTE: for proper handling of moved/copied files, we have to use
-				# the second filename.
-				filename = m.group(2)
-				svndiff.append("Index: %s" % filename)
-				svndiff.append("=" * 67)
-				filecount += 1
-				logging.info(line)
-			else:
-				svndiff.append(line)
-		if not filecount:
-			ErrorExit("No valid patches found in output from hg diff")
-		return "\n".join(svndiff) + "\n"
-
-	def GetUnknownFiles(self):
-		"""Return a list of files unknown to the VCS."""
-		args = []
-		status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
-				silent_ok=True)
-		unknown_files = []
-		for line in status.splitlines():
-			st, fn = line.split(" ", 1)
-			if st == "?":
-				unknown_files.append(fn)
-		return unknown_files
-
-	def get_hg_status(self, rev, path):
-		# We'd like to use 'hg status -C path', but that is buggy
-		# (see http://mercurial.selenic.com/bts/issue3023).
-		# Instead, run 'hg status -C' without a path
-		# and skim the output for the path we want.
-		if self.status is None:
-			if use_hg_shell:
-				out = RunShell(["hg", "status", "-C", "--rev", rev])
-			else:
-				fui = FakeMercurialUI()
-				ret = hg_commands.status(fui, self.repo, *[], **{'rev': [rev], 'copies': True})
-				if ret:
-					raise hg_util.Abort(ret)
-				out = fui.output
-			self.status = out.splitlines()
-		for i in range(len(self.status)):
-			# line is
-			#	A path
-			#	M path
-			# etc
-			line = to_slash(self.status[i])
-			if line[2:] == path:
-				if i+1 < len(self.status) and self.status[i+1][:2] == '  ':
-					return self.status[i:i+2]
-				return self.status[i:i+1]
-		raise hg_util.Abort("no status for " + path)
-	
-	def GetBaseFile(self, filename):
-		set_status("inspecting " + filename)
-		# "hg status" and "hg cat" both take a path relative to the current subdir
-		# rather than to the repo root, but "hg diff" has given us the full path
-		# to the repo root.
-		base_content = ""
-		new_content = None
-		is_binary = False
-		oldrelpath = relpath = self._GetRelPath(filename)
-		out = self.get_hg_status(self.base_rev, relpath)
-		status, what = out[0].split(' ', 1)
-		if len(out) > 1 and status == "A" and what == relpath:
-			oldrelpath = out[1].strip()
-			status = "M"
-		if ":" in self.base_rev:
-			base_rev = self.base_rev.split(":", 1)[0]
-		else:
-			base_rev = self.base_rev
-		if status != "A":
-			if use_hg_shell:
-				base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath], silent_ok=True)
-			else:
-				base_content = str(self.repo[base_rev][oldrelpath].data())
-			is_binary = "\0" in base_content  # Mercurial's heuristic
-		if status != "R":
-			new_content = open(relpath, "rb").read()
-			is_binary = is_binary or "\0" in new_content
-		if is_binary and base_content and use_hg_shell:
-			# Fetch again without converting newlines
-			base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
-				silent_ok=True, universal_newlines=False)
-		if not is_binary or not self.IsImage(relpath):
-			new_content = None
-		return base_content, new_content, is_binary, status
-
-
-# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
-def SplitPatch(data):
-	"""Splits a patch into separate pieces for each file.
-
-	Args:
-		data: A string containing the output of svn diff.
-
-	Returns:
-		A list of 2-tuple (filename, text) where text is the svn diff output
-			pertaining to filename.
-	"""
-	patches = []
-	filename = None
-	diff = []
-	for line in data.splitlines(True):
-		new_filename = None
-		if line.startswith('Index:'):
-			unused, new_filename = line.split(':', 1)
-			new_filename = new_filename.strip()
-		elif line.startswith('Property changes on:'):
-			unused, temp_filename = line.split(':', 1)
-			# When a file is modified, paths use '/' between directories, however
-			# when a property is modified '\' is used on Windows.  Make them the same
-			# otherwise the file shows up twice.
-			temp_filename = to_slash(temp_filename.strip())
-			if temp_filename != filename:
-				# File has property changes but no modifications, create a new diff.
-				new_filename = temp_filename
-		if new_filename:
-			if filename and diff:
-				patches.append((filename, ''.join(diff)))
-			filename = new_filename
-			diff = [line]
-			continue
-		if diff is not None:
-			diff.append(line)
-	if filename and diff:
-		patches.append((filename, ''.join(diff)))
-	return patches
-
-
-def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
-	"""Uploads a separate patch for each file in the diff output.
-
-	Returns a list of [patch_key, filename] for each file.
-	"""
-	patches = SplitPatch(data)
-	rv = []
-	for patch in patches:
-		set_status("uploading patch for " + patch[0])
-		if len(patch[1]) > MAX_UPLOAD_SIZE:
-			print ("Not uploading the patch for " + patch[0] +
-				" because the file is too large.")
-			continue
-		form_fields = [("filename", patch[0])]
-		if not options.download_base:
-			form_fields.append(("content_upload", "1"))
-		files = [("data", "data.diff", patch[1])]
-		ctype, body = EncodeMultipartFormData(form_fields, files)
-		url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
-		print "Uploading patch for " + patch[0]
-		response_body = rpc_server.Send(url, body, content_type=ctype)
-		lines = response_body.splitlines()
-		if not lines or lines[0] != "OK":
-			StatusUpdate("  --> %s" % response_body)
-			sys.exit(1)
-		rv.append([lines[1], patch[0]])
-	return rv
diff --git a/lib/git/commit-msg.hook b/lib/git/commit-msg.hook
new file mode 100755
index 0000000..985016b
--- /dev/null
+++ b/lib/git/commit-msg.hook
@@ -0,0 +1,104 @@
+#!/bin/sh
+# From Gerrit Code Review 2.2.1
+#
+# Part of Gerrit Code Review (http://code.google.com/p/gerrit/)
+#
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+CHANGE_ID_AFTER="Bug|Issue"
+MSG="$1"
+
+# Check for, and add if missing, a unique Change-Id
+#
+add_ChangeId() {
+	clean_message=`sed -e '
+		/^diff --git a\/.*/{
+			s///
+			q
+		}
+		/^Signed-off-by:/d
+		/^#/d
+	' "$MSG" | git stripspace`
+	if test -z "$clean_message"
+	then
+		return
+	fi
+
+	if grep -i '^Change-Id:' "$MSG" >/dev/null
+	then
+		return
+	fi
+
+	id=`_gen_ChangeId`
+	perl -e '
+		$MSG = shift;
+		$id = shift;
+		$CHANGE_ID_AFTER = shift;
+
+		undef $/;
+		open(I, $MSG); $_ = <I>; close I;
+		s|^diff --git a/.*||ms;
+		s|^#.*$||mg;
+		exit unless $_;
+
+		@message = split /\n/;
+		$haveFooter = 0;
+		$startFooter = @message;
+		for($line = @message - 1; $line >= 0; $line--) {
+			$_ = $message[$line];
+
+			if (/^[a-zA-Z0-9-]+:/ && !m,^[a-z0-9-]+://,) {
+				$haveFooter++;
+				next;
+			}
+			next if /^[ []/;
+			$startFooter = $line if ($haveFooter && /^\r?$/);
+			last;
+		}
+
+		@footer = @message[$startFooter+1..@message];
+		@message = @message[0..$startFooter];
+		push(@footer, "") unless @footer;
+
+		for ($line = 0; $line < @footer; $line++) {
+			$_ = $footer[$line];
+			next if /^($CHANGE_ID_AFTER):/i;
+			last;
+		}
+		splice(@footer, $line, 0, "Change-Id: I$id");
+
+		$_ = join("\n", @message, @footer);
+		open(O, ">$MSG"); print O; close O;
+	' "$MSG" "$id" "$CHANGE_ID_AFTER"
+}
+_gen_ChangeIdInput() {
+	echo "tree `git write-tree`"
+	if parent=`git rev-parse HEAD^0 2>/dev/null`
+	then
+		echo "parent $parent"
+	fi
+	echo "author `git var GIT_AUTHOR_IDENT`"
+	echo "committer `git var GIT_COMMITTER_IDENT`"
+	echo
+	printf '%s' "$clean_message"
+}
+_gen_ChangeId() {
+	_gen_ChangeIdInput |
+	git hash-object -t commit --stdin
+}
+
+
+add_ChangeId
diff --git a/libre2.symbols b/libre2.symbols
index 1a9cae3..8308b64 100644
--- a/libre2.symbols
+++ b/libre2.symbols
@@ -6,10 +6,11 @@
 		# re2::StringPiece*
 		_ZN3re211StringPiece*;
 		_ZNK3re211StringPiece*;
-		# operator<<(std::ostream&, re2::StringPiece const&)
-		_ZlsRSoRKN3re211StringPieceE;
+		# re2::operator<<*
+		_ZN3re2ls*;
 		# re2::FilteredRE2*
 		_ZN3re211FilteredRE2*;
+		_ZNK3re211FilteredRE2*;
 	local:
 		*;
 };
diff --git a/libre2.symbols.darwin b/libre2.symbols.darwin
index 93eab3e..31e8c52 100644
--- a/libre2.symbols.darwin
+++ b/libre2.symbols.darwin
@@ -5,7 +5,8 @@
 # re2::StringPiece*
 __ZN3re211StringPiece*
 __ZNK3re211StringPiece*
-# operator<<(std::ostream&, re2::StringPiece const&)
-__ZlsRSoRKN3re211StringPieceE
+# re2::operator<<*
+__ZN3re2ls*
 # re2::FilteredRE2*
 __ZN3re211FilteredRE2*
+__ZNK3re211FilteredRE2*
diff --git a/re2.pc b/re2.pc
new file mode 100644
index 0000000..d66cf51
--- /dev/null
+++ b/re2.pc
@@ -0,0 +1,10 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+includedir=@includedir@
+libdir=@libdir@
+
+Name: re2
+Description: RE2 is a fast, safe, thread-friendly regular expression engine.
+Version: 0.0.0
+Cflags: -std=c++11 -pthread -I${includedir}
+Libs: -pthread -L${libdir} -lre2
diff --git a/re2/Makefile b/re2/Makefile
deleted file mode 100644
index 8b13789..0000000
--- a/re2/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/re2/bitmap256.h b/re2/bitmap256.h
new file mode 100644
index 0000000..1abae99
--- /dev/null
+++ b/re2/bitmap256.h
@@ -0,0 +1,113 @@
+// Copyright 2016 The RE2 Authors.  All Rights Reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#ifndef RE2_BITMAP256_H_
+#define RE2_BITMAP256_H_
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+#include <stdint.h>
+#include <string.h>
+
+#include "util/util.h"
+#include "util/logging.h"
+
+namespace re2 {
+
+class Bitmap256 {
+ public:
+  Bitmap256() {
+    memset(words_, 0, sizeof words_);
+  }
+
+  // Tests the bit with index c.
+  bool Test(int c) const {
+    DCHECK_GE(c, 0);
+    DCHECK_LE(c, 255);
+
+    return (words_[c / 64] & (1ULL << (c % 64))) != 0;
+  }
+
+  // Sets the bit with index c.
+  void Set(int c) {
+    DCHECK_GE(c, 0);
+    DCHECK_LE(c, 255);
+
+    words_[c / 64] |= (1ULL << (c % 64));
+  }
+
+  // Finds the next non-zero bit with index >= c.
+  // Returns -1 if no such bit exists.
+  int FindNextSetBit(int c) const;
+
+ private:
+  // Finds the least significant non-zero bit in n.
+  static int FindLSBSet(uint64_t n) {
+    DCHECK_NE(n, 0);
+
+#if defined(__GNUC__)
+    return __builtin_ctzll(n);
+#elif defined(_MSC_VER) && defined(_M_X64)
+    unsigned long c;
+    _BitScanForward64(&c, n);
+    return static_cast<int>(c);
+#elif defined(_MSC_VER) && defined(_M_IX86)
+    unsigned long c;
+    if (static_cast<uint32_t>(n) != 0) {
+      _BitScanForward(&c, static_cast<uint32_t>(n));
+      return static_cast<int>(c);
+    } else {
+      _BitScanForward(&c, static_cast<uint32_t>(n >> 32));
+      return static_cast<int>(c) + 32;
+    }
+#else
+    int c = 63;
+    for (int shift = 1 << 5; shift != 0; shift >>= 1) {
+      uint64_t word = n << shift;
+      if (word != 0) {
+        n = word;
+        c -= shift;
+      }
+    }
+    return c;
+#endif
+  }
+
+  uint64_t words_[4];
+};
+
+int Bitmap256::FindNextSetBit(int c) const {
+  DCHECK_GE(c, 0);
+  DCHECK_LE(c, 255);
+
+  // Check the word that contains the bit. Mask out any lower bits.
+  int i = c / 64;
+  uint64_t word = words_[i] & (~0ULL << (c % 64));
+  if (word != 0)
+    return (i * 64) + FindLSBSet(word);
+
+  // Check any following words.
+  i++;
+  switch (i) {
+    case 1:
+      if (words_[1] != 0)
+        return (1 * 64) + FindLSBSet(words_[1]);
+      FALLTHROUGH_INTENDED;
+    case 2:
+      if (words_[2] != 0)
+        return (2 * 64) + FindLSBSet(words_[2]);
+      FALLTHROUGH_INTENDED;
+    case 3:
+      if (words_[3] != 0)
+        return (3 * 64) + FindLSBSet(words_[3]);
+      FALLTHROUGH_INTENDED;
+    default:
+      return -1;
+  }
+}
+
+}  // namespace re2
+
+#endif  // RE2_BITMAP256_H_
diff --git a/re2/bitstate.cc b/re2/bitstate.cc
index 518d642..6e1b44c 100644
--- a/re2/bitstate.cc
+++ b/re2/bitstate.cc
@@ -17,6 +17,13 @@
 // SearchBitState is a fast replacement for the NFA code on small
 // regexps and texts when SearchOnePass cannot be used.
 
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <utility>
+
+#include "util/logging.h"
+#include "util/pod_array.h"
 #include "re2/prog.h"
 #include "re2/regexp.h"
 
@@ -31,7 +38,6 @@
 class BitState {
  public:
   explicit BitState(Prog* prog);
-  ~BitState();
 
   // The usual Search prototype.
   // Can only call Search once per BitState.
@@ -42,7 +48,7 @@
  private:
   inline bool ShouldVisit(int id, const char* p);
   void Push(int id, const char* p, int arg);
-  bool GrowStack();
+  void GrowStack();
   bool TrySearch(int id, const char* p);
 
   // Search parameters
@@ -52,20 +58,15 @@
   bool anchored_;           // whether search is anchored at text.begin()
   bool longest_;            // whether search wants leftmost-longest match
   bool endmatch_;           // whether match must end at text.end()
-  StringPiece *submatch_;   // submatches to fill in
+  StringPiece* submatch_;   // submatches to fill in
   int nsubmatch_;           //   # of submatches to fill in
 
   // Search state
-  const char** cap_;        // capture registers
-  int ncap_;
-
   static const int VisitedBits = 32;
-  uint32 *visited_;         // bitmap: (Inst*, char*) pairs already backtracked
-  int nvisited_;            //   # of words in bitmap
-
-  Job *job_;                // stack of text positions to explore
-  int njob_;
-  int maxjob_;
+  PODArray<uint32_t> visited_;  // bitmap: (Inst*, char*) pairs visited
+  PODArray<const char*> cap_;   // capture registers
+  PODArray<Job> job_;           // stack of text positions to explore
+  int njob_;                    // stack size
 };
 
 BitState::BitState(Prog* prog)
@@ -75,26 +76,15 @@
     endmatch_(false),
     submatch_(NULL),
     nsubmatch_(0),
-    cap_(NULL),
-    ncap_(0),
-    visited_(NULL),
-    nvisited_(0),
-    job_(NULL),
-    njob_(0),
-    maxjob_(0) {
-}
-
-BitState::~BitState() {
-  delete[] visited_;
-  delete[] job_;
-  delete[] cap_;
+    njob_(0) {
 }
 
 // Should the search visit the pair ip, p?
 // If so, remember that it was visited so that the next time,
 // we don't repeat the visit.
 bool BitState::ShouldVisit(int id, const char* p) {
-  uint n = id * (text_.size() + 1) + (p - text_.begin());
+  int n = id * static_cast<int>(text_.size()+1) +
+          static_cast<int>(p-text_.begin());
   if (visited_[n/VisitedBits] & (1 << (n & (VisitedBits-1))))
     return false;
   visited_[n/VisitedBits] |= 1 << (n & (VisitedBits-1));
@@ -102,25 +92,22 @@
 }
 
 // Grow the stack.
-bool BitState::GrowStack() {
-  // VLOG(0) << "Reallocate.";
-  maxjob_ *= 2;
-  Job* newjob = new Job[maxjob_];
-  memmove(newjob, job_, njob_*sizeof job_[0]);
-  delete[] job_;
-  job_ = newjob;
-  if (njob_ >= maxjob_) {
-    LOG(DFATAL) << "Job stack overflow.";
-    return false;
-  }
-  return true;
+void BitState::GrowStack() {
+  PODArray<Job> tmp(2*job_.size());
+  memmove(tmp.data(), job_.data(), njob_*sizeof job_[0]);
+  job_ = std::move(tmp);
 }
 
 // Push the triple (id, p, arg) onto the stack, growing it if necessary.
 void BitState::Push(int id, const char* p, int arg) {
-  if (njob_ >= maxjob_) {
-    if (!GrowStack())
+  if (njob_ >= job_.size()) {
+    GrowStack();
+    if (njob_ >= job_.size()) {
+      LOG(DFATAL) << "GrowStack() failed: "
+                  << "njob_ = " << njob_ << ", "
+                  << "job_.size() = " << job_.size();
       return;
+    }
   }
   int op = prog_->inst(id)->opcode();
   if (op == kInstFail)
@@ -141,6 +128,7 @@
 // Return whether it succeeded.
 bool BitState::TrySearch(int id0, const char* p0) {
   bool matched = false;
+  bool inaltmatch = false;
   const char* end = text_.end();
   njob_ = 0;
   Push(id0, p0, 0);
@@ -159,81 +147,86 @@
     // would have, but we avoid the stack
     // manipulation.
     if (0) {
+    Next:
+      // If the Match of a non-greedy AltMatch failed,
+      // we stop ourselves from trying the ByteRange,
+      // which would steer us off the short circuit.
+      if (prog_->inst(id)->last() || inaltmatch)
+        continue;
+      id++;
+
     CheckAndLoop:
       if (!ShouldVisit(id, p))
         continue;
     }
 
     // Visit ip, p.
-    // VLOG(0) << "Job: " << ip->id() << " "
-    //         << (p - text_.begin()) << " " << arg;
     Prog::Inst* ip = prog_->inst(id);
     switch (ip->opcode()) {
-      case kInstFail:
       default:
         LOG(DFATAL) << "Unexpected opcode: " << ip->opcode() << " arg " << arg;
         return false;
 
-      case kInstAlt:
-        // Cannot just
-        //   Push(ip->out1(), p, 0);
-        //   Push(ip->out(), p, 0);
-        // If, during the processing of ip->out(), we encounter
-        // ip->out1() via another path, we want to process it then.
-        // Pushing it here will inhibit that.  Instead, re-push
-        // ip with arg==1 as a reminder to push ip->out1() later.
+      case kInstFail:
+        continue;
+
+      case kInstAltMatch:
         switch (arg) {
           case 0:
+            inaltmatch = true;
             Push(id, p, 1);  // come back when we're done
+
+            // One opcode is ByteRange; the other leads to Match
+            // (possibly via Nop or Capture).
+            if (ip->greedy(prog_)) {
+              // out1 is the match
+              Push(ip->out1(), p, 0);
+              id = ip->out1();
+              p = end;
+              goto CheckAndLoop;
+            }
+            // out is the match - non-greedy
+            Push(ip->out(), end, 0);
             id = ip->out();
             goto CheckAndLoop;
 
           case 1:
-            // Finished ip->out(); try ip->out1().
-            arg = 0;
-            id = ip->out1();
-            goto CheckAndLoop;
+            inaltmatch = false;
+            continue;
         }
-        LOG(DFATAL) << "Bad arg in kInstCapture: " << arg;
+        LOG(DFATAL) << "Bad arg in kInstAltMatch: " << arg;
         continue;
 
-      case kInstAltMatch:
-        // One opcode is byte range; the other leads to match.
-        if (ip->greedy(prog_)) {
-          // out1 is the match
-          Push(ip->out1(), p, 0);
-          id = ip->out1();
-          p = end;
-          goto CheckAndLoop;
-        }
-        // out is the match - non-greedy
-        Push(ip->out(), end, 0);
-        id = ip->out();
-        goto CheckAndLoop;
-
       case kInstByteRange: {
         int c = -1;
         if (p < end)
           c = *p & 0xFF;
-        if (ip->Matches(c)) {
-          id = ip->out();
-          p++;
-          goto CheckAndLoop;
-        }
-        continue;
+        if (!ip->Matches(c))
+          goto Next;
+
+        if (!ip->last())
+          Push(id+1, p, 0);  // try the next when we're done
+        id = ip->out();
+        p++;
+        goto CheckAndLoop;
       }
 
       case kInstCapture:
         switch (arg) {
           case 0:
-            if (0 <= ip->cap() && ip->cap() < ncap_) {
+            if (!ip->last())
+              Push(id+1, p, 0);  // try the next when we're done
+
+            if (0 <= ip->cap() && ip->cap() < cap_.size()) {
               // Capture p to register, but save old value.
               Push(id, cap_[ip->cap()], 1);  // come back when we're done
               cap_[ip->cap()] = p;
             }
+
             // Continue on.
             id = ip->out();
             goto CheckAndLoop;
+
           case 1:
             // Finished ip->out(); restore the old value.
             cap_[ip->cap()] = p;
@@ -244,19 +237,23 @@
 
       case kInstEmptyWidth:
         if (ip->empty() & ~Prog::EmptyFlags(context_, p))
-          continue;
+          goto Next;
+
+        if (!ip->last())
+          Push(id+1, p, 0);  // try the next when we're done
         id = ip->out();
         goto CheckAndLoop;
 
       case kInstNop:
+        if (!ip->last())
+          Push(id+1, p, 0);  // try the next when we're done
         id = ip->out();
         goto CheckAndLoop;
 
       case kInstMatch: {
         if (endmatch_ && p != text_.end())
-          continue;
+          goto Next;
 
-        // VLOG(0) << "Found match.";
         // We found a match.  If the caller doesn't care
         // where the match is, no point going further.
         if (nsubmatch_ == 0)
@@ -270,7 +267,9 @@
         if (submatch_[0].data() == NULL ||
             (longest_ && p > submatch_[0].end())) {
           for (int i = 0; i < nsubmatch_; i++)
-            submatch_[i] = StringPiece(cap_[2*i], cap_[2*i+1] - cap_[2*i]);
+            submatch_[i] =
+                StringPiece(cap_[2 * i],
+                            static_cast<size_t>(cap_[2 * i + 1] - cap_[2 * i]));
         }
 
         // If going for first match, we're done.
@@ -282,7 +281,7 @@
           return true;
 
         // Otherwise, continue on in hope of a longer match.
-        continue;
+        goto Next;
       }
     }
   }
@@ -308,22 +307,22 @@
   submatch_ = submatch;
   nsubmatch_ = nsubmatch;
   for (int i = 0; i < nsubmatch_; i++)
-    submatch_[i] = NULL;
+    submatch_[i] = StringPiece();
 
   // Allocate scratch space.
-  nvisited_ = (prog_->size() * (text.size()+1) + VisitedBits-1) / VisitedBits;
-  visited_ = new uint32[nvisited_];
-  memset(visited_, 0, nvisited_*sizeof visited_[0]);
-  // VLOG(0) << "nvisited_ = " << nvisited_;
+  int nvisited = prog_->size() * static_cast<int>(text.size()+1);
+  nvisited = (nvisited + VisitedBits-1) / VisitedBits;
+  visited_ = PODArray<uint32_t>(nvisited);
+  memset(visited_.data(), 0, nvisited*sizeof visited_[0]);
 
-  ncap_ = 2*nsubmatch;
-  if (ncap_ < 2)
-    ncap_ = 2;
-  cap_ = new const char*[ncap_];
-  memset(cap_, 0, ncap_*sizeof cap_[0]);
+  int ncap = 2*nsubmatch;
+  if (ncap < 2)
+    ncap = 2;
+  cap_ = PODArray<const char*>(ncap);
+  memset(cap_.data(), 0, ncap*sizeof cap_[0]);
 
-  maxjob_ = 256;
-  job_ = new Job[maxjob_];
+  // When sizeof(Job) == 16, we start with a nice round 4KiB. :)
+  job_ = PODArray<Job>(256);
 
   // Anchored search must start at text.begin().
   if (anchored_) {
@@ -338,6 +337,14 @@
   // but we are not clearing visited_ between calls to TrySearch,
   // so no work is duplicated and it ends up still being linear.
   for (const char* p = text.begin(); p <= text.end(); p++) {
+    // Try to use memchr to find the first byte quickly.
+    int fb = prog_->first_byte();
+    if (fb >= 0 && p < text.end() && (p[0] & 0xFF) != fb) {
+      p = reinterpret_cast<const char*>(memchr(p, fb, text.end() - p));
+      if (p == NULL)
+        p = text.end();
+    }
+
     cap_[0] = p;
     if (TrySearch(prog_->start(), p))  // Match must be leftmost; done.
       return true;
diff --git a/re2/compile.cc b/re2/compile.cc
index 9cddb71..3f8e0cc 100644
--- a/re2/compile.cc
+++ b/re2/compile.cc
@@ -8,6 +8,14 @@
 // This file's external interface is just Regexp::CompileToProg.
 // The Compiler class defined in this file is private.
 
+#include <stdint.h>
+#include <string.h>
+#include <unordered_map>
+#include <utility>
+
+#include "util/logging.h"
+#include "util/pod_array.h"
+#include "util/utf.h"
 #include "re2/prog.h"
 #include "re2/re2.h"
 #include "re2/regexp.h"
@@ -28,14 +36,14 @@
 // is always the fail instruction, which never appears on a list.
 
 struct PatchList {
-  uint32 p;
+  uint32_t p;
 
   // Returns patch list containing just p.
-  static PatchList Mk(uint32 p);
+  static PatchList Mk(uint32_t p);
 
   // Patches all the entries on l to have value v.
   // Caller must not ever use patch list again.
-  static void Patch(Prog::Inst *inst0, PatchList l, uint32 v);
+  static void Patch(Prog::Inst *inst0, PatchList l, uint32_t v);
 
   // Deref returns the next pointer pointed at by p.
   static PatchList Deref(Prog::Inst *inst0, PatchList l);
@@ -44,10 +52,10 @@
   static PatchList Append(Prog::Inst *inst0, PatchList l1, PatchList l2);
 };
 
-static PatchList nullPatchList;
+static PatchList nullPatchList = { 0 };
 
 // Returns patch list containing just p.
-PatchList PatchList::Mk(uint32 p) {
+PatchList PatchList::Mk(uint32_t p) {
   PatchList l;
   l.p = p;
   return l;
@@ -64,7 +72,7 @@
 }
 
 // Patches all the entries on l to have value v.
-void PatchList::Patch(Prog::Inst *inst0, PatchList l, uint32 val) {
+void PatchList::Patch(Prog::Inst *inst0, PatchList l, uint32_t val) {
   while (l.p != 0) {
     Prog::Inst* ip = &inst0[l.p>>1];
     if (l.p&1) {
@@ -103,20 +111,17 @@
 
 // Compiled program fragment.
 struct Frag {
-  uint32 begin;
+  uint32_t begin;
   PatchList end;
 
-  explicit Frag(LinkerInitialized) {}
   Frag() : begin(0) { end.p = 0; }  // needed so Frag can go in vector
-  Frag(uint32 begin, PatchList end) : begin(begin), end(end) {}
+  Frag(uint32_t begin, PatchList end) : begin(begin), end(end) {}
 };
 
-static Frag kNullFrag(LINKER_INITIALIZED);
-
 // Input encodings.
 enum Encoding {
   kEncodingUTF8 = 1,  // UTF-8 (0-10FFFF)
-  kEncodingLatin1,    // Latin1 (0-FF)
+  kEncodingLatin1,    // Latin-1 (0-FF)
 };
 
 class Compiler : public Regexp::Walker<Frag> {
@@ -128,12 +133,11 @@
   // Caller is responsible for deleting Prog when finished with it.
   // If reversed is true, compiles for walking over the input
   // string backward (reverses all concatenations).
-  static Prog *Compile(Regexp* re, bool reversed, int64 max_mem);
+  static Prog *Compile(Regexp* re, bool reversed, int64_t max_mem);
 
   // Compiles alternation of all the re to a new Prog.
   // Each re has a match with an id equal to its index in the vector.
-  static Prog* CompileSet(const RE2::Options& options, RE2::Anchor anchor,
-                          Regexp* re);
+  static Prog* CompileSet(Regexp* re, RE2::Anchor anchor, int64_t max_mem);
 
   // Interface for Regexp::Walker, which helps traverse the Regexp.
   // The walk is purely post-recursive: given the machines for the
@@ -165,7 +169,7 @@
   Frag NoMatch();
 
   // Returns a fragment that matches the empty string.
-  Frag Match(int32 id);
+  Frag Match(int32_t id);
 
   // Returns a no-op fragment.
   Frag Nop();
@@ -181,9 +185,6 @@
   // Returns -1 if no more instructions are available.
   int AllocInst(int n);
 
-  // Deletes unused instructions.
-  void Trim();
-
   // Rune range compiler.
 
   // Begins a new alternation.
@@ -196,19 +197,35 @@
   void Add_80_10ffff();
 
   // New suffix that matches the byte range lo-hi, then goes to next.
-  int RuneByteSuffix(uint8 lo, uint8 hi, bool foldcase, int next);
-  int UncachedRuneByteSuffix(uint8 lo, uint8 hi, bool foldcase, int next);
+  int UncachedRuneByteSuffix(uint8_t lo, uint8_t hi, bool foldcase, int next);
+  int CachedRuneByteSuffix(uint8_t lo, uint8_t hi, bool foldcase, int next);
+
+  // Returns true iff the suffix is cached.
+  bool IsCachedRuneByteSuffix(int id);
 
   // Adds a suffix to alternation.
   void AddSuffix(int id);
 
+  // Adds a suffix to the trie starting from the given root node.
+  // Returns zero iff allocating an instruction fails. Otherwise, returns
+  // the current root node, which might be different from what was given.
+  int AddSuffixRecursive(int root, int id);
+
+  // Finds the trie node for the given suffix. Returns a Frag in order to
+  // distinguish between pointing at the root node directly (end.p == 0)
+  // and pointing at an Alt's out1 or out (end.p&1 == 1 or 0, respectively).
+  Frag FindByteRange(int root, int id);
+
+  // Compares two ByteRanges and returns true iff they are equal.
+  bool ByteRangeEqual(int id1, int id2);
+
   // Returns the alternation of all the added suffixes.
   Frag EndRange();
 
   // Single rune.
   Frag Literal(Rune r, bool foldcase);
 
-  void Setup(Regexp::ParseFlags, int64, RE2::Anchor);
+  void Setup(Regexp::ParseFlags, int64_t, RE2::Anchor);
   Prog* Finish();
 
   // Returns .* where dot = any byte
@@ -220,20 +237,19 @@
   Encoding encoding_;  // Input encoding
   bool reversed_;      // Should program run backward over text?
 
-  int max_inst_;       // Maximum number of instructions.
+  PODArray<Prog::Inst> inst_;
+  int ninst_;          // Number of instructions used.
+  int max_ninst_;      // Maximum number of instructions.
 
-  Prog::Inst* inst_;   // Pointer to first instruction.
-  int inst_len_;       // Number of instructions used.
-  int inst_cap_;       // Number of instructions allocated.
+  int64_t max_mem_;    // Total memory budget.
 
-  int64 max_mem_;      // Total memory budget.
-
-  map<uint64, int> rune_cache_;
+  std::unordered_map<uint64_t, int> rune_cache_;
   Frag rune_range_;
 
   RE2::Anchor anchor_;  // anchor mode for RE2::Set
 
-  DISALLOW_EVIL_CONSTRUCTORS(Compiler);
+  Compiler(const Compiler&) = delete;
+  Compiler& operator=(const Compiler&) = delete;
 };
 
 Compiler::Compiler() {
@@ -241,53 +257,41 @@
   failed_ = false;
   encoding_ = kEncodingUTF8;
   reversed_ = false;
-  inst_ = NULL;
-  inst_len_ = 0;
-  inst_cap_ = 0;
-  max_inst_ = 1;  // make AllocInst for fail instruction okay
+  ninst_ = 0;
+  max_ninst_ = 1;  // make AllocInst for fail instruction okay
   max_mem_ = 0;
   int fail = AllocInst(1);
   inst_[fail].InitFail();
-  max_inst_ = 0;  // Caller must change
+  max_ninst_ = 0;  // Caller must change
 }
 
 Compiler::~Compiler() {
   delete prog_;
-  delete[] inst_;
 }
 
 int Compiler::AllocInst(int n) {
-  if (failed_ || inst_len_ + n > max_inst_) {
+  if (failed_ || ninst_ + n > max_ninst_) {
     failed_ = true;
     return -1;
   }
 
-  if (inst_len_ + n > inst_cap_) {
-    if (inst_cap_ == 0)
-      inst_cap_ = 8;
-    while (inst_len_ + n > inst_cap_)
-      inst_cap_ *= 2;
-    Prog::Inst* ip = new Prog::Inst[inst_cap_];
-    memmove(ip, inst_, inst_len_ * sizeof ip[0]);
-    memset(ip + inst_len_, 0, (inst_cap_ - inst_len_) * sizeof ip[0]);
-    delete[] inst_;
-    inst_ = ip;
+  if (ninst_ + n > inst_.size()) {
+    int cap = inst_.size();
+    if (cap == 0)
+      cap = 8;
+    while (ninst_ + n > cap)
+      cap *= 2;
+    PODArray<Prog::Inst> inst(cap);
+    if (inst_.data() != NULL)
+      memmove(inst.data(), inst_.data(), ninst_*sizeof inst_[0]);
+    memset(inst.data() + ninst_, 0, (cap - ninst_)*sizeof inst_[0]);
+    inst_ = std::move(inst);
   }
-  int id = inst_len_;
-  inst_len_ += n;
+  int id = ninst_;
+  ninst_ += n;
   return id;
 }
 
-void Compiler::Trim() {
-  if (inst_len_ < inst_cap_) {
-    Prog::Inst* ip = new Prog::Inst[inst_len_];
-    memmove(ip, inst_, inst_len_ * sizeof ip[0]);
-    delete[] inst_;
-    inst_ = ip;
-    inst_cap_ = inst_len_;
-  }
-}
-
 // These routines are somewhat hard to visualize in text --
 // see http://swtch.com/~rsc/regexp/regexp1.html for
 // pictures explaining what is going on here.
@@ -312,17 +316,18 @@
   if (begin->opcode() == kInstNop &&
       a.end.p == (a.begin << 1) &&
       begin->out() == 0) {
-    PatchList::Patch(inst_, a.end, b.begin);  // in case refs to a somewhere
+    // in case refs to a somewhere
+    PatchList::Patch(inst_.data(), a.end, b.begin);
     return b;
   }
 
   // To run backward over string, reverse all concatenations.
   if (reversed_) {
-    PatchList::Patch(inst_, b.end, a.begin);
+    PatchList::Patch(inst_.data(), b.end, a.begin);
     return Frag(b.begin, a.end);
   }
 
-  PatchList::Patch(inst_, a.end, b.begin);
+  PatchList::Patch(inst_.data(), a.end, b.begin);
   return Frag(a.begin, b.end);
 }
 
@@ -339,7 +344,7 @@
     return NoMatch();
 
   inst_[id].InitAlt(a.begin, b.begin);
-  return Frag(id, PatchList::Append(inst_, a.end, b.end));
+  return Frag(id, PatchList::Append(inst_.data(), a.end, b.end));
 }
 
 // When capturing submatches in like-Perl mode, a kOpAlt Inst
@@ -355,7 +360,7 @@
   if (id < 0)
     return NoMatch();
   inst_[id].InitAlt(0, 0);
-  PatchList::Patch(inst_, a.end, id);
+  PatchList::Patch(inst_.data(), a.end, id);
   if (nongreedy) {
     inst_[id].out1_ = a.begin;
     return Frag(id, PatchList::Mk(id << 1));
@@ -374,6 +379,8 @@
 
 // Given a fragment for a, returns a fragment for a? or a?? (if nongreedy)
 Frag Compiler::Quest(Frag a, bool nongreedy) {
+  if (IsNoMatch(a))
+    return Nop();
   int id = AllocInst(1);
   if (id < 0)
     return NoMatch();
@@ -385,7 +392,7 @@
     inst_[id].InitAlt(a.begin, 0);
     pl = PatchList::Mk((id << 1) | 1);
   }
-  return Frag(id, PatchList::Append(inst_, pl, a.end));
+  return Frag(id, PatchList::Append(inst_.data(), pl, a.end));
 }
 
 // Returns a fragment for the byte range lo-hi.
@@ -394,16 +401,6 @@
   if (id < 0)
     return NoMatch();
   inst_[id].InitByteRange(lo, hi, foldcase, 0);
-  prog_->byte_inst_count_++;
-  prog_->MarkByteRange(lo, hi);
-  if (foldcase && lo <= 'z' && hi >= 'a') {
-    if (lo < 'a')
-      lo = 'a';
-    if (hi > 'z')
-      hi = 'z';
-    if (lo <= hi)
-      prog_->MarkByteRange(lo + 'A' - 'a', hi + 'A' - 'a');
-  }
   return Frag(id, PatchList::Mk(id << 1));
 }
 
@@ -417,7 +414,7 @@
 }
 
 // Returns a fragment that signals a match.
-Frag Compiler::Match(int32 match_id) {
+Frag Compiler::Match(int32_t match_id) {
   int id = AllocInst(1);
   if (id < 0)
     return NoMatch();
@@ -431,27 +428,19 @@
   if (id < 0)
     return NoMatch();
   inst_[id].InitEmptyWidth(empty, 0);
-  if (empty & (kEmptyBeginLine|kEmptyEndLine))
-    prog_->MarkByteRange('\n', '\n');
-  if (empty & (kEmptyWordBoundary|kEmptyNonWordBoundary)) {
-    int j;
-    for (int i = 0; i < 256; i = j) {
-      for (j = i+1; j < 256 && Prog::IsWordChar(i) == Prog::IsWordChar(j); j++)
-        ;
-      prog_->MarkByteRange(i, j-1);
-    }
-  }
   return Frag(id, PatchList::Mk(id << 1));
 }
 
 // Given a fragment a, returns a fragment with capturing parens around a.
 Frag Compiler::Capture(Frag a, int n) {
+  if (IsNoMatch(a))
+    return NoMatch();
   int id = AllocInst(2);
   if (id < 0)
     return NoMatch();
   inst_[id].InitCapture(2*n, a.begin);
   inst_[id+1].InitCapture(2*n+1, 0);
-  PatchList::Patch(inst_, a.end, id+1);
+  PatchList::Patch(inst_.data(), a.end, id+1);
 
   return Frag(id, PatchList::Mk((id+1) << 1));
 }
@@ -481,29 +470,29 @@
   rune_range_.end = nullPatchList;
 }
 
-int Compiler::UncachedRuneByteSuffix(uint8 lo, uint8 hi, bool foldcase,
+int Compiler::UncachedRuneByteSuffix(uint8_t lo, uint8_t hi, bool foldcase,
                                      int next) {
   Frag f = ByteRange(lo, hi, foldcase);
   if (next != 0) {
-    PatchList::Patch(inst_, f.end, next);
+    PatchList::Patch(inst_.data(), f.end, next);
   } else {
-    rune_range_.end = PatchList::Append(inst_, rune_range_.end, f.end);
+    rune_range_.end = PatchList::Append(inst_.data(), rune_range_.end, f.end);
   }
   return f.begin;
 }
 
-int Compiler::RuneByteSuffix(uint8 lo, uint8 hi, bool foldcase, int next) {
-  // In Latin1 mode, there's no point in caching.
-  // In forward UTF-8 mode, only need to cache continuation bytes.
-  if (encoding_ == kEncodingLatin1 ||
-      (encoding_ == kEncodingUTF8 &&
-       !reversed_ &&
-       !(0x80 <= lo && hi <= 0xbf))) {
-    return UncachedRuneByteSuffix(lo, hi, foldcase, next);
-  }
+static uint64_t MakeRuneCacheKey(uint8_t lo, uint8_t hi, bool foldcase,
+                                 int next) {
+  return (uint64_t)next << 17 |
+         (uint64_t)lo   <<  9 |
+         (uint64_t)hi   <<  1 |
+         (uint64_t)foldcase;
+}
 
-  uint64 key = ((uint64)next << 17) | (lo<<9) | (hi<<1) | foldcase;
-  map<uint64, int>::iterator it = rune_cache_.find(key);
+int Compiler::CachedRuneByteSuffix(uint8_t lo, uint8_t hi, bool foldcase,
+                                   int next) {
+  uint64_t key = MakeRuneCacheKey(lo, hi, foldcase, next);
+  std::unordered_map<uint64_t, int>::const_iterator it = rune_cache_.find(key);
   if (it != rune_cache_.end())
     return it->second;
   int id = UncachedRuneByteSuffix(lo, hi, foldcase, next);
@@ -511,12 +500,31 @@
   return id;
 }
 
+bool Compiler::IsCachedRuneByteSuffix(int id) {
+  uint8_t lo = inst_[id].lo_;
+  uint8_t hi = inst_[id].hi_;
+  bool foldcase = inst_[id].foldcase() != 0;
+  int next = inst_[id].out();
+
+  uint64_t key = MakeRuneCacheKey(lo, hi, foldcase, next);
+  return rune_cache_.find(key) != rune_cache_.end();
+}
+
 void Compiler::AddSuffix(int id) {
+  if (failed_)
+    return;
+
   if (rune_range_.begin == 0) {
     rune_range_.begin = id;
     return;
   }
 
+  if (encoding_ == kEncodingUTF8) {
+    // Build a trie in order to reduce fanout.
+    rune_range_.begin = AddSuffixRecursive(rune_range_.begin, id);
+    return;
+  }
+
   int alt = AllocInst(1);
   if (alt < 0) {
     rune_range_.begin = 0;
@@ -526,6 +534,102 @@
   rune_range_.begin = alt;
 }
 
+int Compiler::AddSuffixRecursive(int root, int id) {
+  DCHECK(inst_[root].opcode() == kInstAlt ||
+         inst_[root].opcode() == kInstByteRange);
+
+  Frag f = FindByteRange(root, id);
+  if (IsNoMatch(f)) {
+    int alt = AllocInst(1);
+    if (alt < 0)
+      return 0;
+    inst_[alt].InitAlt(root, id);
+    return alt;
+  }
+
+  int br;
+  if (f.end.p == 0)
+    br = root;
+  else if (f.end.p&1)
+    br = inst_[f.begin].out1();
+  else
+    br = inst_[f.begin].out();
+
+  if (IsCachedRuneByteSuffix(br)) {
+    // We can't fiddle with cached suffixes, so make a clone of the head.
+    int byterange = AllocInst(1);
+    if (byterange < 0)
+      return 0;
+    inst_[byterange].InitByteRange(inst_[br].lo(), inst_[br].hi(),
+                                   inst_[br].foldcase(), inst_[br].out());
+
+    // Ensure that the parent points to the clone, not to the original.
+    // Note that this could leave the head unreachable except via the cache.
+    br = byterange;
+    if (f.end.p == 0)
+      root = br;
+    else if (f.end.p&1)
+      inst_[f.begin].out1_ = br;
+    else
+      inst_[f.begin].set_out(br);
+  }
+
+  int out = inst_[id].out();
+  if (!IsCachedRuneByteSuffix(id)) {
+    // The head should be the instruction most recently allocated, so free it
+    // instead of leaving it unreachable.
+    DCHECK_EQ(id, ninst_-1);
+    inst_[id].out_opcode_ = 0;
+    inst_[id].out1_ = 0;
+    ninst_--;
+  }
+
+  out = AddSuffixRecursive(inst_[br].out(), out);
+  if (out == 0)
+    return 0;
+
+  inst_[br].set_out(out);
+  return root;
+}
+
+bool Compiler::ByteRangeEqual(int id1, int id2) {
+  return inst_[id1].lo() == inst_[id2].lo() &&
+         inst_[id1].hi() == inst_[id2].hi() &&
+         inst_[id1].foldcase() == inst_[id2].foldcase();
+}
+
+Frag Compiler::FindByteRange(int root, int id) {
+  if (inst_[root].opcode() == kInstByteRange) {
+    if (ByteRangeEqual(root, id))
+      return Frag(root, nullPatchList);
+    else
+      return NoMatch();
+  }
+
+  while (inst_[root].opcode() == kInstAlt) {
+    int out1 = inst_[root].out1();
+    if (ByteRangeEqual(out1, id))
+      return Frag(root, PatchList::Mk((root << 1) | 1));
+
+    // CharClass is a sorted list of ranges, so if out1 of the root Alt wasn't
+    // what we're looking for, then we can stop immediately. Unfortunately, we
+    // can't short-circuit the search in reverse mode.
+    if (!reversed_)
+      return NoMatch();
+
+    int out = inst_[root].out();
+    if (inst_[out].opcode() == kInstAlt)
+      root = out;
+    else if (ByteRangeEqual(out, id))
+      return Frag(root, PatchList::Mk(root << 1));
+    else
+      return NoMatch();
+  }
+
+  LOG(DFATAL) << "should never happen";
+  return NoMatch();
+}
+
 Frag Compiler::EndRange() {
   return rune_range_;
 }
@@ -549,12 +653,13 @@
 }
 
 void Compiler::AddRuneRangeLatin1(Rune lo, Rune hi, bool foldcase) {
-  // Latin1 is easy: runes *are* bytes.
+  // Latin-1 is easy: runes *are* bytes.
   if (lo > hi || lo > 0xFF)
     return;
   if (hi > 0xFF)
     hi = 0xFF;
-  AddSuffix(RuneByteSuffix(lo, hi, foldcase, 0));
+  AddSuffix(UncachedRuneByteSuffix(static_cast<uint8_t>(lo),
+                                   static_cast<uint8_t>(hi), foldcase, 0));
 }
 
 // Table describing how to make a UTF-8 matching machine
@@ -595,7 +700,8 @@
     int next = 0;
     if (p.next >= 0)
       next = inst[p.next];
-    inst[i] = UncachedRuneByteSuffix(p.lo, p.hi, false, next);
+    inst[i] = UncachedRuneByteSuffix(static_cast<uint8_t>(p.lo),
+                                     static_cast<uint8_t>(p.hi), false, next);
     if ((p.lo & 0xC0) != 0x80)
       AddSuffix(inst[i]);
   }
@@ -624,13 +730,14 @@
 
   // ASCII range is always a special case.
   if (hi < Runeself) {
-    AddSuffix(RuneByteSuffix(lo, hi, foldcase, 0));
+    AddSuffix(UncachedRuneByteSuffix(static_cast<uint8_t>(lo),
+                                     static_cast<uint8_t>(hi), foldcase, 0));
     return;
   }
 
   // Split range into sections that agree on leading bytes.
   for (int i = 1; i < UTFmax; i++) {
-    uint m = (1<<(6*i)) - 1;  // last i bytes of a UTF-8 sequence
+    uint32_t m = (1<<(6*i)) - 1;  // last i bytes of a UTF-8 sequence
     if ((lo & ~m) != (hi & ~m)) {
       if ((lo & m) != 0) {
         AddRuneRangeUTF8(lo, lo|m, foldcase);
@@ -646,19 +753,55 @@
   }
 
   // Finally.  Generate byte matching equivalent for lo-hi.
-  uint8 ulo[UTFmax], uhi[UTFmax];
+  uint8_t ulo[UTFmax], uhi[UTFmax];
   int n = runetochar(reinterpret_cast<char*>(ulo), &lo);
   int m = runetochar(reinterpret_cast<char*>(uhi), &hi);
   (void)m;  // USED(m)
   DCHECK_EQ(n, m);
 
+  // The logic below encodes this thinking:
+  //
+  // 1. When we have built the whole suffix, we know that it cannot
+  // possibly be a suffix of anything longer: in forward mode, nothing
+  // else can occur before the leading byte; in reverse mode, nothing
+  // else can occur after the last continuation byte or else the leading
+  // byte would have to change. Thus, there is no benefit to caching
+  // the first byte of the suffix whereas there is a cost involved in
+  // cloning it if it begins a common prefix, which is fairly likely.
+  //
+  // 2. Conversely, the last byte of the suffix cannot possibly be a
+  // prefix of anything because next == 0, so we will never want to
+  // clone it, but it is fairly likely to be a common suffix. Perhaps
+  // more so in reverse mode than in forward mode because the former is
+  // "converging" towards lower entropy, but caching is still worthwhile
+  // for the latter in cases such as 80-BF.
+  //
+  // 3. Handling the bytes between the first and the last is less
+  // straightforward and, again, the approach depends on whether we are
+  // "converging" towards lower entropy: in forward mode, a single byte
+  // is unlikely to be part of a common suffix whereas a byte range
+  // is more likely so; in reverse mode, a byte range is unlikely to
+  // be part of a common suffix whereas a single byte is more likely
+  // so. The same benefit versus cost argument applies here.
   int id = 0;
   if (reversed_) {
-    for (int i = 0; i < n; i++)
-      id = RuneByteSuffix(ulo[i], uhi[i], false, id);
+    for (int i = 0; i < n; i++) {
+      // In reverse UTF-8 mode: cache the leading byte; don't cache the last
+      // continuation byte; cache anything else iff it's a single byte (XX-XX).
+      if (i == 0 || (ulo[i] == uhi[i] && i != n-1))
+        id = CachedRuneByteSuffix(ulo[i], uhi[i], false, id);
+      else
+        id = UncachedRuneByteSuffix(ulo[i], uhi[i], false, id);
+    }
   } else {
-    for (int i = n-1; i >= 0; i--)
-      id = RuneByteSuffix(ulo[i], uhi[i], false, id);
+    for (int i = n-1; i >= 0; i--) {
+      // In forward UTF-8 mode: don't cache the leading byte; cache the last
+      // continuation byte; cache anything else iff it's a byte range (XX-YY).
+      if (i == n-1 || (ulo[i] < uhi[i] && i != 0))
+        id = CachedRuneByteSuffix(ulo[i], uhi[i], false, id);
+      else
+        id = UncachedRuneByteSuffix(ulo[i], uhi[i], false, id);
+    }
   }
   AddSuffix(id);
 }
@@ -684,13 +827,13 @@
   if (failed_)
     *stop = true;
 
-  return kNullFrag;  // not used by caller
+  return Frag();  // not used by caller
 }
 
 Frag Compiler::Literal(Rune r, bool foldcase) {
   switch (encoding_) {
     default:
-      return kNullFrag;
+      return Frag();
 
     case kEncodingLatin1:
       return ByteRange(r, r, foldcase);
@@ -698,11 +841,11 @@
     case kEncodingUTF8: {
       if (r < Runeself)  // Make common case fast.
         return ByteRange(r, r, foldcase);
-      uint8 buf[UTFmax];
+      uint8_t buf[UTFmax];
       int n = runetochar(reinterpret_cast<char*>(buf), &r);
-      Frag f = ByteRange((uint8)buf[0], buf[0], false);
+      Frag f = ByteRange((uint8_t)buf[0], buf[0], false);
       for (int i = 1; i < n; i++)
-        f = Cat(f, ByteRange((uint8)buf[i], buf[i], false));
+        f = Cat(f, ByteRange((uint8_t)buf[i], buf[i], false));
       return f;
     }
   }
@@ -731,9 +874,11 @@
 
     case kRegexpHaveMatch: {
       Frag f = Match(re->match_id());
-      // Remember unanchored match to end of string.
-      if (anchor_ != RE2::ANCHOR_BOTH)
-        f = Cat(DotStar(), Cat(EmptyWidth(kEmptyEndText), f));
+      if (anchor_ == RE2::ANCHOR_BOTH) {
+        // Append \z or else the subexpression will effectively be unanchored.
+        // Complemented by the UNANCHORED case in CompileSet().
+        f = Cat(EmptyWidth(kEmptyEndText), f);
+      }
       return f;
     }
 
@@ -752,16 +897,16 @@
     }
 
     case kRegexpStar:
-      return Star(child_frags[0], re->parse_flags()&Regexp::NonGreedy);
+      return Star(child_frags[0], (re->parse_flags()&Regexp::NonGreedy) != 0);
 
     case kRegexpPlus:
-      return Plus(child_frags[0], re->parse_flags()&Regexp::NonGreedy);
+      return Plus(child_frags[0], (re->parse_flags()&Regexp::NonGreedy) != 0);
 
     case kRegexpQuest:
-      return Quest(child_frags[0], re->parse_flags()&Regexp::NonGreedy);
+      return Quest(child_frags[0], (re->parse_flags()&Regexp::NonGreedy) != 0);
 
     case kRegexpLiteral:
-      return Literal(re->rune(), re->parse_flags()&Regexp::FoldCase);
+      return Literal(re->rune(), (re->parse_flags()&Regexp::FoldCase) != 0);
 
     case kRegexpLiteralString: {
       // Concatenation of literals.
@@ -769,7 +914,8 @@
         return Nop();
       Frag f;
       for (int i = 0; i < re->nrunes(); i++) {
-        Frag f1 = Literal(re->runes()[i], re->parse_flags()&Regexp::FoldCase);
+        Frag f1 = Literal(re->runes()[i],
+                          (re->parse_flags()&Regexp::FoldCase) != 0);
         if (i == 0)
           f = f1;
         else
@@ -814,7 +960,8 @@
         // If this range contains all of A-Za-z or none of it,
         // the fold flag is unnecessary; don't bother.
         bool fold = foldascii;
-        if ((i->lo <= 'A' && 'z' <= i->hi) || i->hi < 'A' || 'z' < i->lo)
+        if ((i->lo <= 'A' && 'z' <= i->hi) || i->hi < 'A' || 'z' < i->lo ||
+            ('Z' < i->lo && i->hi < 'a'))
           fold = false;
 
         AddRuneRange(i->lo, i->hi, fold);
@@ -871,12 +1018,11 @@
       if (re->nsub() > 0) {
         sub = re->sub()[0]->Incref();
         if (IsAnchorStart(&sub, depth+1)) {
-          Regexp** subcopy = new Regexp*[re->nsub()];
+          PODArray<Regexp*> subcopy(re->nsub());
           subcopy[0] = sub;  // already have reference
           for (int i = 1; i < re->nsub(); i++)
             subcopy[i] = re->sub()[i]->Incref();
-          *pre = Regexp::Concat(subcopy, re->nsub(), re->parse_flags());
-          delete[] subcopy;
+          *pre = Regexp::Concat(subcopy.data(), re->nsub(), re->parse_flags());
           re->Decref();
           return true;
         }
@@ -919,12 +1065,11 @@
       if (re->nsub() > 0) {
         sub = re->sub()[re->nsub() - 1]->Incref();
         if (IsAnchorEnd(&sub, depth+1)) {
-          Regexp** subcopy = new Regexp*[re->nsub()];
+          PODArray<Regexp*> subcopy(re->nsub());
           subcopy[re->nsub() - 1] = sub;  // already have reference
           for (int i = 0; i < re->nsub() - 1; i++)
             subcopy[i] = re->sub()[i]->Incref();
-          *pre = Regexp::Concat(subcopy, re->nsub(), re->parse_flags());
-          delete[] subcopy;
+          *pre = Regexp::Concat(subcopy.data(), re->nsub(), re->parse_flags());
           re->Decref();
           return true;
         }
@@ -948,7 +1093,7 @@
   return false;
 }
 
-void Compiler::Setup(Regexp::ParseFlags flags, int64 max_mem,
+void Compiler::Setup(Regexp::ParseFlags flags, int64_t max_mem,
                      RE2::Anchor anchor) {
   prog_->set_flags(flags);
 
@@ -956,15 +1101,15 @@
     encoding_ = kEncodingLatin1;
   max_mem_ = max_mem;
   if (max_mem <= 0) {
-    max_inst_ = 100000;  // more than enough
-  } else if (max_mem <= sizeof(Prog)) {
+    max_ninst_ = 100000;  // more than enough
+  } else if (static_cast<size_t>(max_mem) <= sizeof(Prog)) {
     // No room for anything.
-    max_inst_ = 0;
+    max_ninst_ = 0;
   } else {
-    int64 m = (max_mem - sizeof(Prog)) / sizeof(Prog::Inst);
+    int64_t m = (max_mem - sizeof(Prog)) / sizeof(Prog::Inst);
     // Limit instruction count so that inst->id() fits nicely in an int.
     // SparseArray also assumes that the indices (inst->id()) are ints.
-    // The call to WalkExponential uses 2*max_inst_ below,
+    // The call to WalkExponential uses 2*max_ninst_ below,
     // and other places in the code use 2 or 3 * prog->size().
     // Limiting to 2^24 should avoid overflow in those places.
     // (The point of allowing more than 32 bits of memory is to
@@ -977,7 +1122,7 @@
     if (m > Prog::Inst::kMaxInst)
       m = Prog::Inst::kMaxInst;
 
-    max_inst_ = m;
+    max_ninst_ = static_cast<int>(m);
   }
 
   anchor_ = anchor;
@@ -988,10 +1133,9 @@
 // If reversed is true, compiles a program that expects
 // to run over the input string backward (reverses all concatenations).
 // The reversed flag is also recorded in the returned program.
-Prog* Compiler::Compile(Regexp* re, bool reversed, int64 max_mem) {
+Prog* Compiler::Compile(Regexp* re, bool reversed, int64_t max_mem) {
   Compiler c;
-
-  c.Setup(re->parse_flags(), max_mem, RE2::ANCHOR_BOTH /* unused */);
+  c.Setup(re->parse_flags(), max_mem, RE2::UNANCHORED /* unused */);
   c.reversed_ = reversed;
 
   // Simplify to remove things like counted repetitions
@@ -1006,7 +1150,7 @@
   bool is_anchor_end = IsAnchorEnd(&sre, 0);
 
   // Generate fragment for entire regexp.
-  Frag f = c.WalkExponential(sre, kNullFrag, 2*c.max_inst_);
+  Frag all = c.WalkExponential(sre, Frag(), 2*c.max_ninst_);
   sre->Decref();
   if (c.failed_)
     return NULL;
@@ -1015,10 +1159,10 @@
   // Turn off c.reversed_ (if it is set) to force the remaining concatenations
   // to behave normally.
   c.reversed_ = false;
-  Frag all = c.Cat(f, c.Match(0));
-  c.prog_->set_start(all.begin);
+  all = c.Cat(all, c.Match(0));
 
-  if (reversed) {
+  c.prog_->set_reversed(reversed);
+  if (c.prog_->reversed()) {
     c.prog_->set_anchor_start(is_anchor_end);
     c.prog_->set_anchor_end(is_anchor_start);
   } else {
@@ -1026,15 +1170,12 @@
     c.prog_->set_anchor_end(is_anchor_end);
   }
 
-  // Also create unanchored version, which starts with a .*? loop.
-  if (c.prog_->anchor_start()) {
-    c.prog_->set_start_unanchored(c.prog_->start());
-  } else {
-    Frag unanchored = c.Cat(c.DotStar(), all);
-    c.prog_->set_start_unanchored(unanchored.begin);
+  c.prog_->set_start(all.begin);
+  if (!c.prog_->anchor_start()) {
+    // Also create unanchored version, which starts with a .*? loop.
+    all = c.Cat(c.DotStar(), all);
   }
-
-  c.prog_->set_reversed(reversed);
+  c.prog_->set_start_unanchored(all.begin);
 
   // Hand ownership of prog_ to caller.
   return c.Finish();
@@ -1046,25 +1187,22 @@
 
   if (prog_->start() == 0 && prog_->start_unanchored() == 0) {
     // No possible matches; keep Fail instruction only.
-    inst_len_ = 1;
+    ninst_ = 1;
   }
 
-  // Trim instruction to minimum array and transfer to Prog.
-  Trim();
-  prog_->inst_ = inst_;
-  prog_->size_ = inst_len_;
-  inst_ = NULL;
-
-  // Compute byte map.
-  prog_->ComputeByteMap();
+  // Hand off the array to Prog.
+  prog_->inst_ = std::move(inst_);
+  prog_->size_ = ninst_;
 
   prog_->Optimize();
+  prog_->Flatten();
+  prog_->ComputeByteMap();
 
   // Record remaining memory for DFA.
   if (max_mem_ <= 0) {
     prog_->set_dfa_mem(1<<20);
   } else {
-    int64 m = max_mem_ - sizeof(Prog) - inst_len_*sizeof(Prog::Inst);
+    int64_t m = max_mem_ - sizeof(Prog) - prog_->size_*sizeof(Prog::Inst);
     if (m < 0)
       m = 0;
     prog_->set_dfa_mem(m);
@@ -1076,11 +1214,11 @@
 }
 
 // Converts Regexp to Prog.
-Prog* Regexp::CompileToProg(int64 max_mem) {
+Prog* Regexp::CompileToProg(int64_t max_mem) {
   return Compiler::Compile(this, false, max_mem);
 }
 
-Prog* Regexp::CompileToReverseProg(int64 max_mem) {
+Prog* Regexp::CompileToReverseProg(int64_t max_mem) {
   return Compiler::Compile(this, true, max_mem);
 }
 
@@ -1089,41 +1227,41 @@
 }
 
 // Compiles RE set to Prog.
-Prog* Compiler::CompileSet(const RE2::Options& options, RE2::Anchor anchor,
-                           Regexp* re) {
+Prog* Compiler::CompileSet(Regexp* re, RE2::Anchor anchor, int64_t max_mem) {
   Compiler c;
+  c.Setup(re->parse_flags(), max_mem, anchor);
 
-  Regexp::ParseFlags pf = static_cast<Regexp::ParseFlags>(options.ParseFlags());
-  c.Setup(pf, options.max_mem(), anchor);
+  Regexp* sre = re->Simplify();
+  if (sre == NULL)
+    return NULL;
 
-  // Compile alternation of fragments.
-  Frag all = c.WalkExponential(re, kNullFrag, 2*c.max_inst_);
-  re->Decref();
+  Frag all = c.WalkExponential(sre, Frag(), 2*c.max_ninst_);
+  sre->Decref();
   if (c.failed_)
     return NULL;
 
-  if (anchor == RE2::UNANCHORED) {
-    // The trailing .* was added while handling kRegexpHaveMatch.
-    // We just have to add the leading one.
-    all = c.Cat(c.DotStar(), all);
-  }
-
-  c.prog_->set_start(all.begin);
-  c.prog_->set_start_unanchored(all.begin);
   c.prog_->set_anchor_start(true);
   c.prog_->set_anchor_end(true);
 
+  if (anchor == RE2::UNANCHORED) {
+    // Prepend .* or else the expression will effectively be anchored.
+    // Complemented by the ANCHOR_BOTH case in PostVisit().
+    all = c.Cat(c.DotStar(), all);
+  }
+  c.prog_->set_start(all.begin);
+  c.prog_->set_start_unanchored(all.begin);
+
   Prog* prog = c.Finish();
   if (prog == NULL)
     return NULL;
 
   // Make sure DFA has enough memory to operate,
   // since we're not going to fall back to the NFA.
-  bool failed;
+  bool dfa_failed = false;
   StringPiece sp = "hello, world";
   prog->SearchDFA(sp, sp, Prog::kAnchored, Prog::kManyMatch,
-                  NULL, &failed, NULL);
-  if (failed) {
+                  NULL, &dfa_failed, NULL);
+  if (dfa_failed) {
     delete prog;
     return NULL;
   }
@@ -1131,9 +1269,8 @@
   return prog;
 }
 
-Prog* Prog::CompileSet(const RE2::Options& options, RE2::Anchor anchor,
-                       Regexp* re) {
-  return Compiler::CompileSet(options, anchor, re);
+Prog* Prog::CompileSet(Regexp* re, RE2::Anchor anchor, int64_t max_mem) {
+  return Compiler::CompileSet(re, anchor, max_mem);
 }
 
 }  // namespace re2
diff --git a/re2/dfa.cc b/re2/dfa.cc
index 2556c0f..89b9b77 100644
--- a/re2/dfa.cc
+++ b/re2/dfa.cc
@@ -21,15 +21,34 @@
 //
 // See http://swtch.com/~rsc/regexp/ for a very bare-bones equivalent.
 
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <algorithm>
+#include <atomic>
+#include <deque>
+#include <mutex>
+#include <new>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "util/logging.h"
+#include "util/mix.h"
+#include "util/mutex.h"
+#include "util/pod_array.h"
+#include "util/sparse_set.h"
+#include "util/strutil.h"
 #include "re2/prog.h"
 #include "re2/stringpiece.h"
-#include "util/atomicops.h"
-#include "util/flags.h"
-#include "util/sparse_set.h"
 
-DEFINE_bool(re2_dfa_bail_when_slow, true,
-            "Whether the RE2 DFA should bail out early "
-            "if the NFA would be faster (for testing).");
+// Silence "zero-sized array in struct/union" warning for DFA::State::next_.
+#ifdef _MSC_VER
+#pragma warning(disable: 4200)
+#endif
 
 namespace re2 {
 
@@ -44,9 +63,12 @@
 }
 #endif
 
+// Controls whether the DFA should bail out early if the NFA would be faster.
+static bool dfa_should_bail_when_slow = true;
+
 // Changing this to true compiles in prints that trace execution of the DFA.
 // Generates a lot of output -- only useful for debugging.
-static const bool DebugDFA = false;
+static const bool ExtraDebug = false;
 
 // A DFA implementation of a regular expression program.
 // Since this is entirely a forward declaration mandated by C++,
@@ -54,7 +76,7 @@
 // the comments in the sections that follow the DFA definition.
 class DFA {
  public:
-  DFA(Prog* prog, Prog::MatchKind kind, int64 max_mem);
+  DFA(Prog* prog, Prog::MatchKind kind, int64_t max_mem);
   ~DFA();
   bool ok() const { return !init_failed_; }
   Prog::MatchKind kind() { return kind_; }
@@ -74,11 +96,13 @@
   //   memory), it sets *failed and returns false.
   bool Search(const StringPiece& text, const StringPiece& context,
               bool anchored, bool want_earliest_match, bool run_forward,
-              bool* failed, const char** ep, vector<int>* matches);
+              bool* failed, const char** ep, SparseSet* matches);
 
-  // Builds out all states for the entire DFA.  FOR TESTING ONLY
-  // Returns number of states.
-  int BuildAllStates();
+  // Builds out all states for the entire DFA.
+  // If cb is not empty, it receives one callback per state built.
+  // Returns the number of states built.
+  // FOR TESTING OR EXPERIMENTAL PURPOSES ONLY.
+  int BuildAllStates(const Prog::DFAStateCallback& cb);
 
   // Computes min and max for matching strings.  Won't return strings
   // bigger than maxlen.
@@ -86,101 +110,78 @@
 
   // These data structures are logically private, but C++ makes it too
   // difficult to mark them as such.
-  class Workq;
   class RWLocker;
   class StateSaver;
+  class Workq;
 
   // A single DFA state.  The DFA is represented as a graph of these
   // States, linked by the next_ pointers.  If in state s and reading
   // byte c, the next state should be s->next_[c].
   struct State {
-    inline bool IsMatch() const { return flag_ & kFlagMatch; }
-    void SaveMatch(vector<int>* v);
+    inline bool IsMatch() const { return (flag_ & kFlagMatch) != 0; }
+    void SaveMatch(std::vector<int>* v);
 
     int* inst_;         // Instruction pointers in the state.
     int ninst_;         // # of inst_ pointers.
-    uint flag_;         // Empty string bitfield flags in effect on the way
+    uint32_t flag_;     // Empty string bitfield flags in effect on the way
                         // into this state, along with kFlagMatch if this
                         // is a matching state.
-    State** next_;      // Outgoing arrows from State,
+
+// Work around the bug affecting flexible array members in GCC 6.x (for x >= 1).
+// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70932)
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && __GNUC_MINOR__ >= 1
+    std::atomic<State*> next_[0];   // Outgoing arrows from State,
+#else
+    std::atomic<State*> next_[];    // Outgoing arrows from State,
+#endif
+
                         // one per input byte class
   };
 
   enum {
     kByteEndText = 256,         // imaginary byte at end of text
 
-    kFlagEmptyMask = 0xFFF,     // State.flag_: bits holding kEmptyXXX flags
-    kFlagMatch = 0x1000,        // State.flag_: this is a matching state
-    kFlagLastWord = 0x2000,     // State.flag_: last byte was a word char
+    kFlagEmptyMask = 0xFF,      // State.flag_: bits holding kEmptyXXX flags
+    kFlagMatch = 0x0100,        // State.flag_: this is a matching state
+    kFlagLastWord = 0x0200,     // State.flag_: last byte was a word char
     kFlagNeedShift = 16,        // needed kEmpty bits are or'ed in shifted left
   };
 
-#ifndef STL_MSVC
-  // STL function structures for use with unordered_set.
+  struct StateHash {
+    size_t operator()(const State* a) const {
+      DCHECK(a != NULL);
+      HashMix mix(a->flag_);
+      for (int i = 0; i < a->ninst_; i++)
+        mix.Mix(a->inst_[i]);
+      mix.Mix(0);
+      return mix.get();
+    }
+  };
+
   struct StateEqual {
     bool operator()(const State* a, const State* b) const {
+      DCHECK(a != NULL);
+      DCHECK(b != NULL);
       if (a == b)
         return true;
-      if (a == NULL || b == NULL)
+      if (a->flag_ != b->flag_)
         return false;
       if (a->ninst_ != b->ninst_)
         return false;
-      if (a->flag_ != b->flag_)
-        return false;
       for (int i = 0; i < a->ninst_; i++)
         if (a->inst_[i] != b->inst_[i])
           return false;
-      return true;  // they're equal
+      return true;
     }
   };
-#endif  // STL_MSVC
-  struct StateHash {
-    size_t operator()(const State* a) const {
-      if (a == NULL)
-        return 0;
-      const char* s = reinterpret_cast<const char*>(a->inst_);
-      int len = a->ninst_ * sizeof a->inst_[0];
-      if (sizeof(size_t) == sizeof(uint32))
-        return Hash32StringWithSeed(s, len, a->flag_);
-      else
-        return Hash64StringWithSeed(s, len, a->flag_);
-    }
-#ifdef STL_MSVC
-    // Less than operator.
-    bool operator()(const State* a, const State* b) const {
-      if (a == b)
-        return false;
-      if (a == NULL || b == NULL)
-        return a == NULL;
-      if (a->ninst_ != b->ninst_)
-        return a->ninst_ < b->ninst_;
-      if (a->flag_ != b->flag_)
-        return a->flag_ < b->flag_;
-      for (int i = 0; i < a->ninst_; ++i)
-        if (a->inst_[i] != b->inst_[i])
-          return a->inst_[i] < b->inst_[i];
-      return false;  // they're equal
-    }
-    // The two public members are required by msvc. 4 and 8 are default values.
-    // Reference: http://msdn.microsoft.com/en-us/library/1s1byw77.aspx
-    static const size_t bucket_size = 4;
-    static const size_t min_buckets = 8;
-#endif  // STL_MSVC
-  };
 
-#ifdef STL_MSVC
-  typedef unordered_set<State*, StateHash> StateSet;
-#else  // !STL_MSVC
-  typedef unordered_set<State*, StateHash, StateEqual> StateSet;
-#endif  // STL_MSVC
-
+  typedef std::unordered_set<State*, StateHash, StateEqual> StateSet;
 
  private:
-  // Special "firstbyte" values for a state.  (Values >= 0 denote actual bytes.)
+  // Special "first_byte" values for a state.  (Values >= 0 denote actual bytes.)
   enum {
     kFbUnknown = -1,   // No analysis has been performed.
-    kFbMany = -2,      // Many bytes will lead out of this state.
-    kFbNone = -3,      // No bytes lead out of this state.
+    kFbNone = -2,      // The first-byte trick cannot be used.
   };
 
   enum {
@@ -205,11 +206,11 @@
 
   // Looks up and returns the State corresponding to a Workq.
   // L >= mutex_
-  State* WorkqToCachedState(Workq* q, uint flag);
+  State* WorkqToCachedState(Workq* q, Workq* mq, uint32_t flag);
 
   // Looks up and returns a State matching the inst, ninst, and flag.
   // L >= mutex_
-  State* CachedState(int* inst, int ninst, uint flag);
+  State* CachedState(int* inst, int ninst, uint32_t flag);
 
   // Clear the cache entirely.
   // Must hold cache_mutex_.w or be in destructor.
@@ -217,7 +218,7 @@
 
   // Converts a State into a Workq: the opposite of WorkqToCachedState.
   // L >= mutex_
-  static void StateToWorkq(State* s, Workq* q);
+  void StateToWorkq(State* s, Workq* q);
 
   // Runs a State on a given byte, returning the next state.
   State* RunStateOnByteUnlocked(State*, int);  // cache_mutex_.r <= L < mutex_
@@ -228,18 +229,16 @@
   // sets *ismatch to true.
   // L >= mutex_
   void RunWorkqOnByte(Workq* q, Workq* nq,
-                             int c, uint flag, bool* ismatch,
-                             Prog::MatchKind kind,
-                             int new_byte_loop);
+                      int c, uint32_t flag, bool* ismatch);
 
   // Runs a Workq on a set of empty-string flags, producing a new Workq in nq.
   // L >= mutex_
-  void RunWorkqOnEmptyString(Workq* q, Workq* nq, uint flag);
+  void RunWorkqOnEmptyString(Workq* q, Workq* nq, uint32_t flag);
 
   // Adds the instruction id to the Workq, following empty arrows
   // according to flag.
   // L >= mutex_
-  void AddToQueue(Workq* q, int id, uint flag);
+  void AddToQueue(Workq* q, int id, uint32_t flag);
 
   // For debugging, returns a text representation of State.
   static string DumpState(State* state);
@@ -256,7 +255,7 @@
         want_earliest_match(false),
         run_forward(false),
         start(NULL),
-        firstbyte(kFbUnknown),
+        first_byte(kFbUnknown),
         cache_lock(cache_lock),
         failed(false),
         ep(NULL),
@@ -268,37 +267,39 @@
     bool want_earliest_match;
     bool run_forward;
     State* start;
-    int firstbyte;
+    int first_byte;
     RWLocker *cache_lock;
     bool failed;     // "out" parameter: whether search gave up
     const char* ep;  // "out" parameter: end pointer for match
-    vector<int>* matches;
+    SparseSet* matches;
 
    private:
-    DISALLOW_EVIL_CONSTRUCTORS(SearchParams);
+    SearchParams(const SearchParams&) = delete;
+    SearchParams& operator=(const SearchParams&) = delete;
   };
 
   // Before each search, the parameters to Search are analyzed by
   // AnalyzeSearch to determine the state in which to start and the
-  // "firstbyte" for that state, if any.
+  // "first_byte" for that state, if any.
   struct StartInfo {
-    StartInfo() : start(NULL), firstbyte(kFbUnknown) { }
+    StartInfo() : start(NULL), first_byte(kFbUnknown) {}
     State* start;
-    volatile int firstbyte;
+    std::atomic<int> first_byte;
   };
 
-  // Fills in params->start and params->firstbyte using
+  // Fills in params->start and params->first_byte using
   // the other search parameters.  Returns true on success,
   // false on failure.
   // cache_mutex_.r <= L < mutex_
   bool AnalyzeSearch(SearchParams* params);
-  bool AnalyzeSearchHelper(SearchParams* params, StartInfo* info, uint flags);
+  bool AnalyzeSearchHelper(SearchParams* params, StartInfo* info,
+                           uint32_t flags);
 
   // The generic search loop, inlined to create specialized versions.
   // cache_mutex_.r <= L < mutex_
   // Might unlock and relock cache_mutex_ via params->cache_lock.
   inline bool InlinedSearchLoop(SearchParams* params,
-                                bool have_firstbyte,
+                                bool have_first_byte,
                                 bool want_earliest_match,
                                 bool run_forward);
 
@@ -340,7 +341,6 @@
   // Constant after initialization.
   Prog* prog_;              // The regular expression program to run.
   Prog::MatchKind kind_;    // The kind of DFA.
-  int start_unanchored_;  // start of unanchored program
   bool init_failed_;        // initialization failed (out of memory)
 
   Mutex mutex_;  // mutex_ >= cache_mutex_.r
@@ -348,8 +348,7 @@
   // Scratch areas, protected by mutex_.
   Workq* q0_;             // Two pre-allocated work queues.
   Workq* q1_;
-  int* astack_;         // Pre-allocated stack for AddToQueue
-  int nastack_;
+  PODArray<int> stack_;   // Pre-allocated stack for AddToQueue
 
   // State* cache.  Many threads use and add to the cache simultaneously,
   // holding cache_mutex_ for reading and mutex_ (above) when adding.
@@ -358,16 +357,15 @@
   // readers.  Any State* pointers are only valid while cache_mutex_
   // is held.
   Mutex cache_mutex_;
-  int64 mem_budget_;       // Total memory budget for all States.
-  int64 state_budget_;     // Amount of memory remaining for new States.
+  int64_t mem_budget_;     // Total memory budget for all States.
+  int64_t state_budget_;   // Amount of memory remaining for new States.
   StateSet state_cache_;   // All States computed so far.
   StartInfo start_[kMaxStart];
-  bool cache_warned_;      // have printed to LOG(INFO) about the cache
 };
 
-// Shorthand for casting to uint8*.
-static inline const uint8* BytePtr(const void* v) {
-  return reinterpret_cast<const uint8*>(v);
+// Shorthand for casting to uint8_t*.
+static inline const uint8_t* BytePtr(const void* v) {
+  return reinterpret_cast<const uint8_t*>(v);
 }
 
 // Work queues
@@ -376,6 +374,10 @@
 // in the work queue when in leftmost-longest matching mode.
 #define Mark (-1)
 
+// Separates the match IDs from the instructions in inst_.
+// Used only for "many match" DFA states.
+#define MatchSep (-2)
+
 // Internally, the DFA uses a sparse array of
 // program instruction pointers as a work queue.
 // In leftmost longest mode, marks separate sections
@@ -428,36 +430,35 @@
   int maxmark_;          // maximum number of marks
   int nextmark_;         // id of next mark
   bool last_was_mark_;   // last inserted was mark
-  DISALLOW_EVIL_CONSTRUCTORS(Workq);
+
+  Workq(const Workq&) = delete;
+  Workq& operator=(const Workq&) = delete;
 };
 
-DFA::DFA(Prog* prog, Prog::MatchKind kind, int64 max_mem)
+DFA::DFA(Prog* prog, Prog::MatchKind kind, int64_t max_mem)
   : prog_(prog),
     kind_(kind),
     init_failed_(false),
     q0_(NULL),
     q1_(NULL),
-    astack_(NULL),
-    mem_budget_(max_mem),
-    cache_warned_(false) {
-  if (DebugDFA)
+    mem_budget_(max_mem) {
+  if (ExtraDebug)
     fprintf(stderr, "\nkind %d\n%s\n", (int)kind_, prog_->DumpUnanchored().c_str());
   int nmark = 0;
-  start_unanchored_ = 0;
-  if (kind_ == Prog::kLongestMatch) {
-    nmark = prog->size();
-    start_unanchored_ = prog->start_unanchored();
-  }
-  nastack_ = 2 * prog->size() + nmark;
+  if (kind_ == Prog::kLongestMatch)
+    nmark = prog_->size();
+  // See DFA::AddToQueue() for why this is so.
+  int nstack = prog_->inst_count(kInstCapture) +
+               prog_->inst_count(kInstEmptyWidth) +
+               prog_->inst_count(kInstNop) +
+               nmark + 1;  // + 1 for start inst
 
-  // Account for space needed for DFA, q0, q1, astack.
+  // Account for space needed for DFA, q0, q1, stack.
   mem_budget_ -= sizeof(DFA);
   mem_budget_ -= (prog_->size() + nmark) *
                  (sizeof(int)+sizeof(int)) * 2;  // q0, q1
-  mem_budget_ -= nastack_ * sizeof(int);  // astack
+  mem_budget_ -= nstack * sizeof(int);  // stack
   if (mem_budget_ < 0) {
-    LOG(INFO) << StringPrintf("DFA out of memory: prog size %lld mem %lld",
-                              prog_->size(), max_mem);
     init_failed_ = true;
     return;
   }
@@ -468,24 +469,24 @@
   // At minimum, the search requires room for two states in order
   // to limp along, restarting frequently.  We'll get better performance
   // if there is room for a larger number of states, say 20.
-  int64 one_state = sizeof(State) + (prog_->size()+nmark)*sizeof(int) +
-                    (prog_->bytemap_range()+1)*sizeof(State*);
+  // Note that a state stores list heads only, so we use the program
+  // list count for the upper bound, not the program size.
+  int nnext = prog_->bytemap_range() + 1;  // + 1 for kByteEndText slot
+  int64_t one_state = sizeof(State) + nnext*sizeof(std::atomic<State*>) +
+                      (prog_->list_count()+nmark)*sizeof(int);
   if (state_budget_ < 20*one_state) {
-    LOG(INFO) << StringPrintf("DFA out of memory: prog size %lld mem %lld",
-                              prog_->size(), max_mem);
     init_failed_ = true;
     return;
   }
 
-  q0_ = new Workq(prog->size(), nmark);
-  q1_ = new Workq(prog->size(), nmark);
-  astack_ = new int[nastack_];
+  q0_ = new Workq(prog_->size(), nmark);
+  q1_ = new Workq(prog_->size(), nmark);
+  stack_ = PODArray<int>(nstack);
 }
 
 DFA::~DFA() {
   delete q0_;
   delete q1_;
-  delete[] astack_;
   ClearCache();
 }
 
@@ -507,7 +508,7 @@
 string DFA::DumpWorkq(Workq* q) {
   string s;
   const char* sep = "";
-  for (DFA::Workq::iterator it = q->begin(); it != q->end(); ++it) {
+  for (Workq::iterator it = q->begin(); it != q->end(); ++it) {
     if (q->is_mark(*it)) {
       StringAppendF(&s, "|");
       sep = "";
@@ -534,6 +535,9 @@
     if (state->inst_[i] == Mark) {
       StringAppendF(&s, "|");
       sep = "";
+    } else if (state->inst_[i] == MatchSep) {
+      StringAppendF(&s, "||");
+      sep = "";
     } else {
       StringAppendF(&s, "%s%d", sep, state->inst_[i]);
       sep = ",";
@@ -601,9 +605,10 @@
 // Looks in the State cache for a State matching q, flag.
 // If one is found, returns it.  If one is not found, allocates one,
 // inserts it in the cache, and returns it.
-DFA::State* DFA::WorkqToCachedState(Workq* q, uint flag) {
-  if (DEBUG_MODE)
-    mutex_.AssertHeld();
+// If mq is not null, MatchSep and the match IDs in mq will be appended
+// to the State.
+DFA::State* DFA::WorkqToCachedState(Workq* q, Workq* mq, uint32_t flag) {
+  //mutex_.AssertHeld();
 
   // Construct array of instruction ids for the new state.
   // Only ByteRange, EmptyWidth, and Match instructions are useful to keep:
@@ -611,10 +616,10 @@
   // RunWorkqOnEmptyString or RunWorkqOnByte.
   int* inst = new int[q->size()];
   int n = 0;
-  uint needflags = 0;     // flags needed by kInstEmptyWidth instructions
-  bool sawmatch = false;  // whether queue contains guaranteed kInstMatch
-  bool sawmark = false;  // whether queue contains a Mark
-  if (DebugDFA)
+  uint32_t needflags = 0;  // flags needed by kInstEmptyWidth instructions
+  bool sawmatch = false;   // whether queue contains guaranteed kInstMatch
+  bool sawmark = false;    // whether queue contains a Mark
+  if (ExtraDebug)
     fprintf(stderr, "WorkqToCachedState %s [%#x]", DumpWorkq(q).c_str(), flag);
   for (Workq::iterator it = q->begin(); it != q->end(); ++it) {
     int id = *it;
@@ -640,36 +645,22 @@
             (kind_ != Prog::kLongestMatch || !sawmark) &&
             (flag & kFlagMatch)) {
           delete[] inst;
-          if (DebugDFA)
+          if (ExtraDebug)
             fprintf(stderr, " -> FullMatchState\n");
           return FullMatchState;
         }
-        // Fall through.
-      case kInstByteRange:    // These are useful.
-      case kInstEmptyWidth:
-      case kInstMatch:
-      case kInstAlt:          // Not useful, but necessary [*]
-        inst[n++] = *it;
+        FALLTHROUGH_INTENDED;
+      default:
+        // Record iff id is the head of its list, which must
+        // be the case if id-1 is the last of *its* list. :)
+        if (prog_->inst(id-1)->last())
+          inst[n++] = *it;
         if (ip->opcode() == kInstEmptyWidth)
           needflags |= ip->empty();
         if (ip->opcode() == kInstMatch && !prog_->anchor_end())
           sawmatch = true;
         break;
-
-      default:                // The rest are not.
-        break;
     }
-
-    // [*] kInstAlt would seem useless to record in a state, since
-    // we've already followed both its arrows and saved all the
-    // interesting states we can reach from there.  The problem
-    // is that one of the empty-width instructions might lead
-    // back to the same kInstAlt (if an empty-width operator is starred),
-    // producing a different evaluation order depending on whether
-    // we keep the kInstAlt to begin with.  Sigh.
-    // A specific case that this affects is /(^|a)+/ matching "a".
-    // If we don't save the kInstAlt, we will match the whole "a" (0,1)
-    // but in fact the correct leftmost-first match is the leading "" (0,0).
   }
   DCHECK_LE(n, q->size());
   if (n > 0 && inst[n-1] == Mark)
@@ -701,7 +692,7 @@
   // if the state is *not* a matching state.
   if (n == 0 && flag == 0) {
     delete[] inst;
-    if (DebugDFA)
+    if (ExtraDebug)
       fprintf(stderr, " -> DeadState\n");
     return DeadState;
   }
@@ -716,13 +707,24 @@
       int* markp = ip;
       while (markp < ep && *markp != Mark)
         markp++;
-      sort(ip, markp);
+      std::sort(ip, markp);
       if (markp < ep)
         markp++;
       ip = markp;
     }
   }
 
+  // Append MatchSep and the match IDs in mq if necessary.
+  if (mq != NULL) {
+    inst[n++] = MatchSep;
+    for (Workq::iterator i = mq->begin(); i != mq->end(); ++i) {
+      int id = *i;
+      Prog::Inst* ip = prog_->inst(id);
+      if (ip->opcode() == kInstMatch)
+        inst[n++] = ip->match_id();
+    }
+  }
+
   // Save the needed empty-width flags in the top bits for use later.
   flag |= needflags << kFlagNeedShift;
 
@@ -734,42 +736,50 @@
 // Looks in the State cache for a State matching inst, ninst, flag.
 // If one is found, returns it.  If one is not found, allocates one,
 // inserts it in the cache, and returns it.
-DFA::State* DFA::CachedState(int* inst, int ninst, uint flag) {
-  if (DEBUG_MODE)
-    mutex_.AssertHeld();
+DFA::State* DFA::CachedState(int* inst, int ninst, uint32_t flag) {
+  //mutex_.AssertHeld();
 
   // Look in the cache for a pre-existing state.
-  State state = { inst, ninst, flag, NULL };
+  // We have to initialise the struct like this because otherwise
+  // MSVC will complain about the flexible array member. :(
+  State state;
+  state.inst_ = inst;
+  state.ninst_ = ninst;
+  state.flag_ = flag;
   StateSet::iterator it = state_cache_.find(&state);
   if (it != state_cache_.end()) {
-    if (DebugDFA)
+    if (ExtraDebug)
       fprintf(stderr, " -cached-> %s\n", DumpState(*it).c_str());
     return *it;
   }
 
   // Must have enough memory for new state.
   // In addition to what we're going to allocate,
-  // the state cache hash table seems to incur about 32 bytes per
+  // the state cache hash table seems to incur about 40 bytes per
   // State*, empirically.
-  const int kStateCacheOverhead = 32;
+  const int kStateCacheOverhead = 40;
   int nnext = prog_->bytemap_range() + 1;  // + 1 for kByteEndText slot
-  int mem = sizeof(State) + nnext*sizeof(State*) + ninst*sizeof(int);
+  int mem = sizeof(State) + nnext*sizeof(std::atomic<State*>) +
+            ninst*sizeof(int);
   if (mem_budget_ < mem + kStateCacheOverhead) {
     mem_budget_ = -1;
     return NULL;
   }
   mem_budget_ -= mem + kStateCacheOverhead;
 
-  // Allocate new state, along with room for next and inst.
-  char* space = new char[mem];
-  State* s = reinterpret_cast<State*>(space);
-  s->next_ = reinterpret_cast<State**>(s + 1);
-  s->inst_ = reinterpret_cast<int*>(s->next_ + nnext);
-  memset(s->next_, 0, nnext*sizeof s->next_[0]);
+  // Allocate new state along with room for next_ and inst_.
+  char* space = std::allocator<char>().allocate(mem);
+  State* s = new (space) State;
+  (void) new (s->next_) std::atomic<State*>[nnext];
+  // Work around a unfortunate bug in older versions of libstdc++.
+  // (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64658)
+  for (int i = 0; i < nnext; i++)
+    (void) new (s->next_ + i) std::atomic<State*>(NULL);
+  s->inst_ = new (s->next_ + nnext) int[ninst];
   memmove(s->inst_, inst, ninst*sizeof s->inst_[0]);
   s->ninst_ = ninst;
   s->flag_ = flag;
-  if (DebugDFA)
+  if (ExtraDebug)
     fprintf(stderr, " -> %s\n", DumpState(s).c_str());
 
   // Put state in cache and return it.
@@ -779,47 +789,59 @@
 
 // Clear the cache.  Must hold cache_mutex_.w or be in destructor.
 void DFA::ClearCache() {
-  // In case state_cache_ doesn't support deleting entries
-  // during iteration, copy into a vector and then delete.
-  vector<State*> v;
-  v.reserve(state_cache_.size());
-  for (StateSet::iterator it = state_cache_.begin();
-       it != state_cache_.end(); ++it)
-    v.push_back(*it);
+  StateSet::iterator begin = state_cache_.begin();
+  StateSet::iterator end = state_cache_.end();
+  while (begin != end) {
+    StateSet::iterator tmp = begin;
+    ++begin;
+    // Deallocate the blob of memory that we allocated in DFA::CachedState().
+    // We recompute mem in order to benefit from sized delete where possible.
+    int ninst = (*tmp)->ninst_;
+    int nnext = prog_->bytemap_range() + 1;  // + 1 for kByteEndText slot
+    int mem = sizeof(State) + nnext*sizeof(std::atomic<State*>) +
+              ninst*sizeof(int);
+    std::allocator<char>().deallocate(reinterpret_cast<char*>(*tmp), mem);
+  }
   state_cache_.clear();
-  for (int i = 0; i < v.size(); i++)
-    delete[] reinterpret_cast<const char*>(v[i]);
 }
 
 // Copies insts in state s to the work queue q.
 void DFA::StateToWorkq(State* s, Workq* q) {
   q->clear();
   for (int i = 0; i < s->ninst_; i++) {
-    if (s->inst_[i] == Mark)
+    if (s->inst_[i] == Mark) {
       q->mark();
-    else
-      q->insert_new(s->inst_[i]);
+    } else if (s->inst_[i] == MatchSep) {
+      // Nothing after this is an instruction!
+      break;
+    } else {
+      // Explore from the head of the list.
+      AddToQueue(q, s->inst_[i], s->flag_ & kFlagEmptyMask);
+    }
   }
 }
 
-// Adds ip to the work queue, following empty arrows according to flag
-// and expanding kInstAlt instructions (two-target gotos).
-void DFA::AddToQueue(Workq* q, int id, uint flag) {
+// Adds ip to the work queue, following empty arrows according to flag.
+void DFA::AddToQueue(Workq* q, int id, uint32_t flag) {
 
-  // Use astack_ to hold our stack of states yet to process.
-  // It is sized to have room for nastack_ == 2*prog->size() + nmark
-  // instructions, which is enough: each instruction can be
-  // processed by the switch below only once, and the processing
-  // pushes at most two instructions plus maybe a mark.
-  // (If we're using marks, nmark == prog->size(); otherwise nmark == 0.)
-  int* stk = astack_;
+  // Use stack_ to hold our stack of instructions yet to process.
+  // It was preallocated as follows:
+  //   one entry per Capture;
+  //   one entry per EmptyWidth; and
+  //   one entry per Nop.
+  // This reflects the maximum number of stack pushes that each can
+  // perform. (Each instruction can be processed at most once.)
+  // When using marks, we also added nmark == prog_->size().
+  // (Otherwise, nmark == 0.)
+  int* stk = stack_.data();
   int nstk = 0;
 
   stk[nstk++] = id;
   while (nstk > 0) {
-    DCHECK_LE(nstk, nastack_);
+    DCHECK_LE(nstk, stack_.size());
     id = stk[--nstk];
 
+  Loop:
     if (id == Mark) {
       q->mark();
       continue;
@@ -829,9 +851,8 @@
       continue;
 
     // If ip is already on the queue, nothing to do.
-    // Otherwise add it.  We don't actually keep all the ones
-    // that get added -- for example, kInstAlt is ignored
-    // when on a work queue -- but adding all ip's here
+    // Otherwise add it.  We don't actually keep all the
+    // ones that get added, but adding all of them here
     // increases the likelihood of q->contains(id),
     // reducing the amount of duplicated work.
     if (q->contains(id))
@@ -841,37 +862,46 @@
     // Process instruction.
     Prog::Inst* ip = prog_->inst(id);
     switch (ip->opcode()) {
-      case kInstFail:       // can't happen: discarded above
+      default:
+        LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
         break;
 
       case kInstByteRange:  // just save these on the queue
       case kInstMatch:
-        break;
+        if (ip->last())
+          break;
+        id = id+1;
+        goto Loop;
 
       case kInstCapture:    // DFA treats captures as no-ops.
       case kInstNop:
-        stk[nstk++] = ip->out();
-        break;
+        if (!ip->last())
+          stk[nstk++] = id+1;
 
-      case kInstAlt:        // two choices: expand both, in order
-      case kInstAltMatch:
-        // Want to visit out then out1, so push on stack in reverse order.
-        // This instruction is the [00-FF]* loop at the beginning of
-        // a leftmost-longest unanchored search, separate out from out1
-        // with a Mark, so that out1's threads (which will start farther
-        // to the right in the string being searched) are lower priority
-        // than the current ones.
-        stk[nstk++] = ip->out1();
-        if (q->maxmark() > 0 &&
+        // If this instruction is the [00-FF]* loop at the beginning of
+        // a leftmost-longest unanchored search, separate with a Mark so
+        // that future threads (which will start farther to the right in
+        // the input string) are lower priority than current threads.
+        if (ip->opcode() == kInstNop && q->maxmark() > 0 &&
             id == prog_->start_unanchored() && id != prog_->start())
           stk[nstk++] = Mark;
-        stk[nstk++] = ip->out();
-        break;
+        id = ip->out();
+        goto Loop;
+
+      case kInstAltMatch:
+        DCHECK(!ip->last());
+        id = id+1;
+        goto Loop;
 
       case kInstEmptyWidth:
-        if ((ip->empty() & flag) == ip->empty())
-          stk[nstk++] = ip->out();
-        break;
+        if (!ip->last())
+          stk[nstk++] = id+1;
+
+        // Continue on if we have all the right flag bits.
+        if (ip->empty() & ~flag)
+          break;
+        id = ip->out();
+        goto Loop;
     }
   }
 }
@@ -892,7 +922,7 @@
 // and then processing only $.  Doing the two-step sequence won't match
 // ^$^$^$ but processing ^ and $ simultaneously will (and is the behavior
 // exhibited by existing implementations).
-void DFA::RunWorkqOnEmptyString(Workq* oldq, Workq* newq, uint flag) {
+void DFA::RunWorkqOnEmptyString(Workq* oldq, Workq* newq, uint32_t flag) {
   newq->clear();
   for (Workq::iterator i = oldq->begin(); i != oldq->end(); ++i) {
     if (oldq->is_mark(*i))
@@ -907,11 +937,8 @@
 // means to match c$.  Sets the bool *ismatch to true if the end of the
 // regular expression program has been reached (the regexp has matched).
 void DFA::RunWorkqOnByte(Workq* oldq, Workq* newq,
-                         int c, uint flag, bool* ismatch,
-                         Prog::MatchKind kind,
-                         int new_byte_loop) {
-  if (DEBUG_MODE)
-    mutex_.AssertHeld();
+                         int c, uint32_t flag, bool* ismatch) {
+  //mutex_.AssertHeld();
 
   newq->clear();
   for (Workq::iterator i = oldq->begin(); i != oldq->end(); ++i) {
@@ -924,10 +951,13 @@
     int id = *i;
     Prog::Inst* ip = prog_->inst(id);
     switch (ip->opcode()) {
+      default:
+        LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
+        break;
+
       case kInstFail:        // never succeeds
       case kInstCapture:     // already followed
       case kInstNop:         // already followed
-      case kInstAlt:         // already followed
       case kInstAltMatch:    // already followed
       case kInstEmptyWidth:  // already followed
         break;
@@ -938,10 +968,11 @@
         break;
 
       case kInstMatch:
-        if (prog_->anchor_end() && c != kByteEndText)
+        if (prog_->anchor_end() && c != kByteEndText &&
+            kind_ != Prog::kManyMatch)
           break;
         *ismatch = true;
-        if (kind == Prog::kFirstMatch) {
+        if (kind_ == Prog::kFirstMatch) {
           // Can stop processing work queue since we found a match.
           return;
         }
@@ -949,7 +980,7 @@
     }
   }
 
-  if (DebugDFA)
+  if (ExtraDebug)
     fprintf(stderr, "%s on %d[%#x] -> %s [%d]\n", DumpWorkq(oldq).c_str(),
             c, flag, DumpWorkq(newq).c_str(), *ismatch);
 }
@@ -965,8 +996,8 @@
 
 // Processes input byte c in state, returning new state.
 DFA::State* DFA::RunStateOnByte(State* state, int c) {
-  if (DEBUG_MODE)
-    mutex_.AssertHeld();
+  //mutex_.AssertHeld();
+
   if (state <= SpecialStateMax) {
     if (state == FullMatchState) {
       // It is convenient for routines like PossibleMatchRange
@@ -988,9 +1019,7 @@
   }
 
   // If someone else already computed this, return it.
-  MaybeReadMemoryBarrier(); // On alpha we need to ensure read ordering
-  State* ns = state->next_[ByteMap(c)];
-  ANNOTATE_HAPPENS_AFTER(ns);
+  State* ns = state->next_[ByteMap(c)].load(std::memory_order_relaxed);
   if (ns != NULL)
     return ns;
 
@@ -1001,10 +1030,10 @@
   // around this byte.  Before the byte we have the flags recorded
   // in the State structure itself.  After the byte we have
   // nothing yet (but that will change: read on).
-  uint needflag = state->flag_ >> kFlagNeedShift;
-  uint beforeflag = state->flag_ & kFlagEmptyMask;
-  uint oldbeforeflag = beforeflag;
-  uint afterflag = 0;
+  uint32_t needflag = state->flag_ >> kFlagNeedShift;
+  uint32_t beforeflag = state->flag_ & kFlagEmptyMask;
+  uint32_t oldbeforeflag = beforeflag;
+  uint32_t afterflag = 0;
 
   if (c == '\n') {
     // Insert implicit $ and ^ around \n
@@ -1020,8 +1049,8 @@
   // The state flag kFlagLastWord says whether the last
   // byte processed was a word character.  Use that info to
   // insert empty-width (non-)word boundaries.
-  bool islastword = state->flag_ & kFlagLastWord;
-  bool isword = (c != kByteEndText && Prog::IsWordChar(c));
+  bool islastword = (state->flag_ & kFlagLastWord) != 0;
+  bool isword = c != kByteEndText && Prog::IsWordChar(static_cast<uint8_t>(c));
   if (isword == islastword)
     beforeflag |= kEmptyNonWordBoundary;
   else
@@ -1031,43 +1060,31 @@
   // Only useful to rerun on empty string if there are new, useful flags.
   if (beforeflag & ~oldbeforeflag & needflag) {
     RunWorkqOnEmptyString(q0_, q1_, beforeflag);
+    using std::swap;
     swap(q0_, q1_);
   }
   bool ismatch = false;
-  RunWorkqOnByte(q0_, q1_, c, afterflag, &ismatch, kind_, start_unanchored_);
-  
-  // Most of the time, we build the state from the output of
-  // RunWorkqOnByte, so swap q0_ and q1_ here.  However, so that
-  // RE2::Set can tell exactly which match instructions
-  // contributed to the match, don't swap if c is kByteEndText.
-  // The resulting state wouldn't be correct for further processing
-  // of the string, but we're at the end of the text so that's okay.
-  // Leaving q0_ alone preseves the match instructions that led to
-  // the current setting of ismatch.
-  if (c != kByteEndText || kind_ != Prog::kManyMatch)
-    swap(q0_, q1_);
+  RunWorkqOnByte(q0_, q1_, c, afterflag, &ismatch);
+  using std::swap;
+  swap(q0_, q1_);
 
   // Save afterflag along with ismatch and isword in new state.
-  uint flag = afterflag;
+  uint32_t flag = afterflag;
   if (ismatch)
     flag |= kFlagMatch;
   if (isword)
     flag |= kFlagLastWord;
 
-  ns = WorkqToCachedState(q0_, flag);
+  if (ismatch && kind_ == Prog::kManyMatch)
+    ns = WorkqToCachedState(q0_, q1_, flag);
+  else
+    ns = WorkqToCachedState(q0_, NULL, flag);
 
+  // Flush ns before linking to it.
   // Write barrier before updating state->next_ so that the
   // main search loop can proceed without any locking, for speed.
   // (Otherwise it would need one mutex operation per input byte.)
-  // The annotations below tell race detectors that:
-  //   a) the access to next_ should be ignored,
-  //   b) 'ns' is properly published.
-  WriteMemoryBarrier();  // Flush ns before linking to it.
-
-  ANNOTATE_IGNORE_WRITES_BEGIN();
-  ANNOTATE_HAPPENS_BEFORE(ns);
-  state->next_[ByteMap(c)] = ns;
-  ANNOTATE_IGNORE_WRITES_END();
+  state->next_[ByteMap(c)].store(ns, std::memory_order_release);
   return ns;
 }
 
@@ -1101,21 +1118,15 @@
   // Notice that the lock is *released* temporarily.
   void LockForWriting();
 
-  // Returns whether the lock is already held for writing.
-  bool IsLockedForWriting() {
-    return writing_;
-  }
-
  private:
   Mutex* mu_;
   bool writing_;
 
-  DISALLOW_EVIL_CONSTRUCTORS(RWLocker);
+  RWLocker(const RWLocker&) = delete;
+  RWLocker& operator=(const RWLocker&) = delete;
 };
 
-DFA::RWLocker::RWLocker(Mutex* mu)
-  : mu_(mu), writing_(false) {
-
+DFA::RWLocker::RWLocker(Mutex* mu) : mu_(mu), writing_(false) {
   mu_->ReaderLock();
 }
 
@@ -1124,16 +1135,16 @@
 void DFA::RWLocker::LockForWriting() NO_THREAD_SAFETY_ANALYSIS {
   if (!writing_) {
     mu_->ReaderUnlock();
-    mu_->Lock();
+    mu_->WriterLock();
     writing_ = true;
   }
 }
 
 DFA::RWLocker::~RWLocker() {
-  if (writing_)
-    mu_->WriterUnlock();
-  else
+  if (!writing_)
     mu_->ReaderUnlock();
+  else
+    mu_->WriterUnlock();
 }
 
 
@@ -1150,24 +1161,12 @@
 
 void DFA::ResetCache(RWLocker* cache_lock) {
   // Re-acquire the cache_mutex_ for writing (exclusive use).
-  bool was_writing = cache_lock->IsLockedForWriting();
   cache_lock->LockForWriting();
 
-  // If we already held cache_mutex_ for writing, it means
-  // this invocation of Search() has already reset the
-  // cache once already.  That's a pretty clear indication
-  // that the cache is too small.  Warn about that, once.
-  // TODO(rsc): Only warn if state_cache_.size() < some threshold.
-  if (was_writing && !cache_warned_) {
-    LOG(INFO) << "DFA memory cache could be too small: "
-              << "only room for " << state_cache_.size() << " states.";
-    cache_warned_ = true;
-  }
-
   // Clear the cache, reset the memory budget.
   for (int i = 0; i < kMaxStart; i++) {
     start_[i].start = NULL;
-    start_[i].firstbyte = kFbUnknown;
+    start_[i].first_byte.store(kFbUnknown, std::memory_order_relaxed);
   }
   ClearCache();
   mem_budget_ = state_budget_;
@@ -1206,11 +1205,12 @@
   DFA* dfa_;         // the DFA to use
   int* inst_;        // saved info from State
   int ninst_;
-  uint flag_;
+  uint32_t flag_;
   bool is_special_;  // whether original state was special
   State* special_;   // if is_special_, the original state
 
-  DISALLOW_EVIL_CONSTRUCTORS(StateSaver);
+  StateSaver(const StateSaver&) = delete;
+  StateSaver& operator=(const StateSaver&) = delete;
 };
 
 DFA::StateSaver::StateSaver(DFA* dfa, State* state) {
@@ -1283,7 +1283,7 @@
 // Instead, it can call memchr to search very quickly for the byte c.
 // Whether the start state has this property is determined during a
 // pre-compilation pass, and if so, the byte b is passed to the search
-// loop as the "firstbyte" argument, along with a boolean "have_firstbyte".
+// loop as the "first_byte" argument, along with a boolean "have_first_byte".
 //
 // Fourth, the desired behavior is to search for the leftmost-best match
 // (approximately, the same one that Perl would find), which is not
@@ -1316,25 +1316,40 @@
 // making them function arguments lets the inliner specialize
 // this function to each combination (see two paragraphs above).
 inline bool DFA::InlinedSearchLoop(SearchParams* params,
-                                   bool have_firstbyte,
+                                   bool have_first_byte,
                                    bool want_earliest_match,
                                    bool run_forward) {
   State* start = params->start;
-  const uint8* bp = BytePtr(params->text.begin());  // start of text
-  const uint8* p = bp;                              // text scanning point
-  const uint8* ep = BytePtr(params->text.end());    // end of text
-  const uint8* resetp = NULL;                       // p at last cache reset
-  if (!run_forward)
+  const uint8_t* bp = BytePtr(params->text.begin());  // start of text
+  const uint8_t* p = bp;                              // text scanning point
+  const uint8_t* ep = BytePtr(params->text.end());    // end of text
+  const uint8_t* resetp = NULL;                       // p at last cache reset
+  if (!run_forward) {
+    using std::swap;
     swap(p, ep);
+  }
 
-  const uint8* bytemap = prog_->bytemap();
-  const uint8* lastmatch = NULL;   // most recent matching position in text
+  const uint8_t* bytemap = prog_->bytemap();
+  const uint8_t* lastmatch = NULL;   // most recent matching position in text
   bool matched = false;
+
   State* s = start;
+  if (ExtraDebug)
+    fprintf(stderr, "@stx: %s\n", DumpState(s).c_str());
 
   if (s->IsMatch()) {
     matched = true;
     lastmatch = p;
+    if (ExtraDebug)
+      fprintf(stderr, "match @stx! [%s]\n", DumpState(s).c_str());
+    if (params->matches != NULL && kind_ == Prog::kManyMatch) {
+      for (int i = s->ninst_ - 1; i >= 0; i--) {
+        int id = s->inst_[i];
+        if (id == MatchSep)
+          break;
+        params->matches->insert(id);
+      }
+    }
     if (want_earliest_match) {
       params->ep = reinterpret_cast<const char*>(lastmatch);
       return true;
@@ -1342,21 +1357,22 @@
   }
 
   while (p != ep) {
-    if (DebugDFA)
-      fprintf(stderr, "@%d: %s\n", static_cast<int>(p - bp),
-              DumpState(s).c_str());
-    if (have_firstbyte && s == start) {
-      // In start state, only way out is to find firstbyte,
+    if (ExtraDebug)
+      fprintf(stderr, "@%td: %s\n",
+              p - bp, DumpState(s).c_str());
+
+    if (have_first_byte && s == start) {
+      // In start state, only way out is to find first_byte,
       // so use optimized assembly in memchr to skip ahead.
-      // If firstbyte isn't found, we can skip to the end
+      // If first_byte isn't found, we can skip to the end
       // of the string.
       if (run_forward) {
-        if ((p = BytePtr(memchr(p, params->firstbyte, ep - p))) == NULL) {
+        if ((p = BytePtr(memchr(p, params->first_byte, ep - p))) == NULL) {
           p = ep;
           break;
         }
       } else {
-        if ((p = BytePtr(memrchr(ep, params->firstbyte, p - ep))) == NULL) {
+        if ((p = BytePtr(memrchr(ep, params->first_byte, p - ep))) == NULL) {
           p = ep;
           break;
         }
@@ -1388,9 +1404,7 @@
     // Okay to use bytemap[] not ByteMap() here, because
     // c is known to be an actual byte and not kByteEndText.
 
-    MaybeReadMemoryBarrier(); // On alpha we need to ensure read ordering
-    State* ns = s->next_[bytemap[c]];
-    ANNOTATE_HAPPENS_AFTER(ns);
+    State* ns = s->next_[bytemap[c]].load(std::memory_order_acquire);
     if (ns == NULL) {
       ns = RunStateOnByteUnlocked(s, c);
       if (ns == NULL) {
@@ -1402,8 +1416,8 @@
         // same at about 2 MB/s.  Unless we're processing an average
         // of 10 bytes per state computation, fail so that RE2 can
         // fall back to the NFA.
-        if (FLAGS_re2_dfa_bail_when_slow && resetp != NULL &&
-            (p - resetp) < 10*state_cache_.size()) {
+        if (dfa_should_bail_when_slow && resetp != NULL &&
+            static_cast<size_t>(p - resetp) < 10*state_cache_.size()) {
           params->failed = true;
           return false;
         }
@@ -1440,8 +1454,8 @@
       params->ep = reinterpret_cast<const char*>(ep);
       return true;
     }
-    s = ns;
 
+    s = ns;
     if (s->IsMatch()) {
       matched = true;
       // The DFA notices the match one byte late,
@@ -1450,11 +1464,17 @@
         lastmatch = p - 1;
       else
         lastmatch = p + 1;
-      if (DebugDFA)
-        fprintf(stderr, "match @%d! [%s]\n",
-                static_cast<int>(lastmatch - bp),
-                DumpState(s).c_str());
-
+      if (ExtraDebug)
+        fprintf(stderr, "match @%td! [%s]\n",
+                lastmatch - bp, DumpState(s).c_str());
+      if (params->matches != NULL && kind_ == Prog::kManyMatch) {
+        for (int i = s->ninst_ - 1; i >= 0; i--) {
+          int id = s->inst_[i];
+          if (id == MatchSep)
+            break;
+          params->matches->insert(id);
+        }
+      }
       if (want_earliest_match) {
         params->ep = reinterpret_cast<const char*>(lastmatch);
         return true;
@@ -1464,6 +1484,9 @@
 
   // Process one more byte to see if it triggers a match.
   // (Remember, matches are delayed one byte.)
+  if (ExtraDebug)
+    fprintf(stderr, "@etx: %s\n", DumpState(s).c_str());
+
   int lastbyte;
   if (run_forward) {
     if (params->text.end() == params->context.end())
@@ -1477,9 +1500,7 @@
       lastbyte = params->text.begin()[-1] & 0xFF;
   }
 
-  MaybeReadMemoryBarrier(); // On alpha we need to ensure read ordering
-  State* ns = s->next_[ByteMap(lastbyte)];
-  ANNOTATE_HAPPENS_AFTER(ns);
+  State* ns = s->next_[ByteMap(lastbyte)].load(std::memory_order_acquire);
   if (ns == NULL) {
     ns = RunStateOnByteUnlocked(s, lastbyte);
     if (ns == NULL) {
@@ -1497,29 +1518,32 @@
       }
     }
   }
-  s = ns;
-  if (DebugDFA)
-    fprintf(stderr, "@_: %s\n", DumpState(s).c_str());
-  if (s == FullMatchState) {
+  if (ns <= SpecialStateMax) {
+    if (ns == DeadState) {
+      params->ep = reinterpret_cast<const char*>(lastmatch);
+      return matched;
+    }
+    // FullMatchState
     params->ep = reinterpret_cast<const char*>(ep);
     return true;
   }
-  if (s > SpecialStateMax && s->IsMatch()) {
+
+  s = ns;
+  if (s->IsMatch()) {
     matched = true;
     lastmatch = p;
-    if (params->matches && kind_ == Prog::kManyMatch) {
-      vector<int>* v = params->matches;
-      v->clear();
-      for (int i = 0; i < s->ninst_; i++) {
-        Prog::Inst* ip = prog_->inst(s->inst_[i]);
-        if (ip->opcode() == kInstMatch)
-          v->push_back(ip->match_id());
+    if (ExtraDebug)
+      fprintf(stderr, "match @etx! [%s]\n", DumpState(s).c_str());
+    if (params->matches != NULL && kind_ == Prog::kManyMatch) {
+      for (int i = s->ninst_ - 1; i >= 0; i--) {
+        int id = s->inst_[i];
+        if (id == MatchSep)
+          break;
+        params->matches->insert(id);
       }
     }
-    if (DebugDFA)
-      fprintf(stderr, "match @%d! [%s]\n", static_cast<int>(lastmatch - bp),
-              DumpState(s).c_str());
   }
+
   params->ep = reinterpret_cast<const char*>(lastmatch);
   return matched;
 }
@@ -1553,7 +1577,7 @@
 // For debugging, calls the general code directly.
 bool DFA::SlowSearchLoop(SearchParams* params) {
   return InlinedSearchLoop(params,
-                           params->firstbyte >= 0,
+                           params->first_byte >= 0,
                            params->want_earliest_match,
                            params->run_forward);
 }
@@ -1574,8 +1598,8 @@
     &DFA::SearchTTT,
   };
 
-  bool have_firstbyte = (params->firstbyte >= 0);
-  int index = 4 * have_firstbyte +
+  bool have_first_byte = params->first_byte >= 0;
+  int index = 4 * have_first_byte +
               2 * params->want_earliest_match +
               1 * params->run_forward;
   return (this->*Searches[index])(params);
@@ -1614,14 +1638,14 @@
 
   // Sanity check: make sure that text lies within context.
   if (text.begin() < context.begin() || text.end() > context.end()) {
-    LOG(DFATAL) << "Text is not inside context.";
+    LOG(DFATAL) << "context does not contain text";
     params->start = DeadState;
     return true;
   }
 
   // Determine correct search type.
   int start;
-  uint flags;
+  uint32_t flags;
   if (params->run_forward) {
     if (text.begin() == context.begin()) {
       start = kStartBeginText;
@@ -1651,7 +1675,7 @@
       flags = 0;
     }
   }
-  if (params->anchored || prog_->anchor_start())
+  if (params->anchored)
     start |= kStartAnchored;
   StartInfo* info = &start_[start];
 
@@ -1667,79 +1691,62 @@
     }
   }
 
-  if (DebugDFA)
-    fprintf(stderr, "anchored=%d fwd=%d flags=%#x state=%s firstbyte=%d\n",
+  if (ExtraDebug)
+    fprintf(stderr, "anchored=%d fwd=%d flags=%#x state=%s first_byte=%d\n",
             params->anchored, params->run_forward, flags,
-            DumpState(info->start).c_str(), info->firstbyte);
+            DumpState(info->start).c_str(), info->first_byte.load());
 
   params->start = info->start;
-  params->firstbyte = ANNOTATE_UNPROTECTED_READ(info->firstbyte);
+  params->first_byte = info->first_byte.load(std::memory_order_acquire);
 
   return true;
 }
 
 // Fills in info if needed.  Returns true on success, false on failure.
 bool DFA::AnalyzeSearchHelper(SearchParams* params, StartInfo* info,
-                              uint flags) {
-  // Quick check; okay because of memory barriers below.
-  if (ANNOTATE_UNPROTECTED_READ(info->firstbyte) != kFbUnknown) {
-    ANNOTATE_HAPPENS_AFTER(&info->firstbyte);
+                              uint32_t flags) {
+  // Quick check.
+  int fb = info->first_byte.load(std::memory_order_acquire);
+  if (fb != kFbUnknown)
     return true;
-  }
 
   MutexLock l(&mutex_);
-  if (info->firstbyte != kFbUnknown) {
-    ANNOTATE_HAPPENS_AFTER(&info->firstbyte);
+  fb = info->first_byte.load(std::memory_order_relaxed);
+  if (fb != kFbUnknown)
     return true;
-  }
 
   q0_->clear();
   AddToQueue(q0_,
              params->anchored ? prog_->start() : prog_->start_unanchored(),
              flags);
-  info->start = WorkqToCachedState(q0_, flags);
+  info->start = WorkqToCachedState(q0_, NULL, flags);
   if (info->start == NULL)
     return false;
 
   if (info->start == DeadState) {
-    ANNOTATE_HAPPENS_BEFORE(&info->firstbyte);
-    WriteMemoryBarrier();  // Synchronize with "quick check" above.
-    info->firstbyte = kFbNone;
+    // Synchronize with "quick check" above.
+    info->first_byte.store(kFbNone, std::memory_order_release);
     return true;
   }
 
   if (info->start == FullMatchState) {
-    ANNOTATE_HAPPENS_BEFORE(&info->firstbyte);
-    WriteMemoryBarrier();  // Synchronize with "quick check" above.
-    info->firstbyte = kFbNone;	// will be ignored
+    // Synchronize with "quick check" above.
+    info->first_byte.store(kFbNone, std::memory_order_release);  // will be ignored
     return true;
   }
 
-  // Compute info->firstbyte by running state on all
-  // possible byte values, looking for a single one that
-  // leads to a different state.
-  int firstbyte = kFbNone;
-  for (int i = 0; i < 256; i++) {
-    State* s = RunStateOnByte(info->start, i);
-    if (s == NULL) {
-      ANNOTATE_HAPPENS_BEFORE(&info->firstbyte);
-      WriteMemoryBarrier();  // Synchronize with "quick check" above.
-      info->firstbyte = firstbyte;
-      return false;
-    }
-    if (s == info->start)
-      continue;
-    // Goes to new state...
-    if (firstbyte == kFbNone) {
-      firstbyte = i;        // ... first one
-    } else {
-      firstbyte = kFbMany;  // ... too many
-      break;
-    }
-  }
-  ANNOTATE_HAPPENS_BEFORE(&info->firstbyte);
-  WriteMemoryBarrier();  // Synchronize with "quick check" above.
-  info->firstbyte = firstbyte;
+  // Even if we have a first_byte, we cannot use it when anchored and,
+  // less obviously, we cannot use it when we are going to need flags.
+  // This trick works only when there is a single byte that leads to a
+  // different state!
+  int first_byte = prog_->first_byte();
+  if (first_byte == -1 ||
+      params->anchored ||
+      info->start->flag_ >> kFlagNeedShift != 0)
+    first_byte = kFbNone;
+
+  // Synchronize with "quick check" above.
+  info->first_byte.store(first_byte, std::memory_order_release);
   return true;
 }
 
@@ -1751,7 +1758,7 @@
                  bool run_forward,
                  bool* failed,
                  const char** epp,
-                 vector<int>* matches) {
+                 SparseSet* matches) {
   *epp = NULL;
   if (!ok()) {
     *failed = true;
@@ -1759,10 +1766,10 @@
   }
   *failed = false;
 
-  if (DebugDFA) {
+  if (ExtraDebug) {
     fprintf(stderr, "\nprogram:\n%s\n", prog_->DumpUnanchored().c_str());
     fprintf(stderr, "text %s anchored=%d earliest=%d fwd=%d kind %d\n",
-            text.as_string().c_str(), anchored, want_earliest_match,
+            string(text).c_str(), anchored, want_earliest_match,
             run_forward, kind_);
   }
 
@@ -1786,7 +1793,7 @@
       *epp = text.end();
     return true;
   }
-  if (DebugDFA)
+  if (ExtraDebug)
     fprintf(stderr, "start %s\n", DumpState(params.start).c_str());
   bool ret = FastSearchLoop(&params);
   if (params.failed) {
@@ -1797,64 +1804,38 @@
   return ret;
 }
 
-// Deletes dfa.
-//
-// This is a separate function so that
-// prog.h can be used without moving the definition of
-// class DFA out of this file.  If you set
-//   prog->dfa_ = dfa;
-// then you also have to set
-//   prog->delete_dfa_ = DeleteDFA;
-// so that ~Prog can delete the dfa.
-static void DeleteDFA(DFA* dfa) {
-  delete dfa;
-}
-
 DFA* Prog::GetDFA(MatchKind kind) {
-  DFA*volatile* pdfa;
-  if (kind == kFirstMatch || kind == kManyMatch) {
-    pdfa = &dfa_first_;
-  } else {
-    kind = kLongestMatch;
-    pdfa = &dfa_longest_;
-  }
-
-  // Quick check; okay because of memory barrier below.
-  DFA *dfa = ANNOTATE_UNPROTECTED_READ(*pdfa);
-  if (dfa != NULL) {
-    ANNOTATE_HAPPENS_AFTER(dfa);
-    return dfa;
-  }
-
-  MutexLock l(&dfa_mutex_);
-  dfa = *pdfa;
-  if (dfa != NULL) {
-    ANNOTATE_HAPPENS_AFTER(dfa);
-    return dfa;
-  }
-
   // For a forward DFA, half the memory goes to each DFA.
+  // However, if it is a "many match" DFA, then there is
+  // no counterpart with which the memory must be shared.
+  //
   // For a reverse DFA, all the memory goes to the
   // "longest match" DFA, because RE2 never does reverse
   // "first match" searches.
-  int64 m = dfa_mem_/2;
-  if (reversed_) {
-    if (kind == kLongestMatch || kind == kManyMatch)
-      m = dfa_mem_;
-    else
-      m = 0;
+  if (kind == kFirstMatch) {
+    std::call_once(dfa_first_once_, [](Prog* prog) {
+      prog->dfa_first_ = new DFA(prog, kFirstMatch, prog->dfa_mem_ / 2);
+    }, this);
+    return dfa_first_;
+  } else if (kind == kManyMatch) {
+    std::call_once(dfa_first_once_, [](Prog* prog) {
+      prog->dfa_first_ = new DFA(prog, kManyMatch, prog->dfa_mem_);
+    }, this);
+    return dfa_first_;
+  } else {
+    std::call_once(dfa_longest_once_, [](Prog* prog) {
+      if (!prog->reversed_)
+        prog->dfa_longest_ = new DFA(prog, kLongestMatch, prog->dfa_mem_ / 2);
+      else
+        prog->dfa_longest_ = new DFA(prog, kLongestMatch, prog->dfa_mem_);
+    }, this);
+    return dfa_longest_;
   }
-  dfa = new DFA(this, kind, m);
-  delete_dfa_ = DeleteDFA;
-
-  // Synchronize with "quick check" above.
-  ANNOTATE_HAPPENS_BEFORE(dfa);
-  WriteMemoryBarrier();
-  *pdfa = dfa;
-
-  return dfa;
 }
 
+void Prog::DeleteDFA(DFA* dfa) {
+  delete dfa;
+}
 
 // Executes the regexp program to search in text,
 // which itself is inside the larger context.  (As a convenience,
@@ -1867,8 +1848,8 @@
 // This is the only external interface (class DFA only exists in this file).
 //
 bool Prog::SearchDFA(const StringPiece& text, const StringPiece& const_context,
-                     Anchor anchor, MatchKind kind,
-                     StringPiece* match0, bool* failed, vector<int>* matches) {
+                     Anchor anchor, MatchKind kind, StringPiece* match0,
+                     bool* failed, SparseSet* matches) {
   *failed = false;
 
   StringPiece context = const_context;
@@ -1877,9 +1858,8 @@
   bool carat = anchor_start();
   bool dollar = anchor_end();
   if (reversed_) {
-    bool t = carat;
-    carat = dollar;
-    dollar = t;
+    using std::swap;
+    swap(carat, dollar);
   }
   if (carat && context.begin() != text.begin())
     return false;
@@ -1891,7 +1871,7 @@
   bool anchored = anchor == kAnchored || anchor_start() || kind == kFullMatch;
   bool endmatch = false;
   if (kind == kManyMatch) {
-    endmatch = true;
+    // This is split out in order to avoid clobbering kind.
   } else if (kind == kFullMatch || anchor_end()) {
     endmatch = true;
     kind = kLongestMatch;
@@ -1899,17 +1879,22 @@
 
   // If the caller doesn't care where the match is (just whether one exists),
   // then we can stop at the very first match we find, the so-called
-  // "shortest match".
-  bool want_shortest_match = false;
-  if (match0 == NULL && !endmatch) {
-    want_shortest_match = true;
+  // "earliest match".
+  bool want_earliest_match = false;
+  if (kind == kManyMatch) {
+    // This is split out in order to avoid clobbering kind.
+    if (matches == NULL) {
+      want_earliest_match = true;
+    }
+  } else if (match0 == NULL && !endmatch) {
+    want_earliest_match = true;
     kind = kLongestMatch;
   }
 
   DFA* dfa = GetDFA(kind);
   const char* ep;
   bool matched = dfa->Search(text, context, anchored,
-                             want_shortest_match, !reversed_,
+                             want_earliest_match, !reversed_,
                              failed, &ep, matches);
   if (*failed)
     return false;
@@ -1923,51 +1908,89 @@
   // as the beginning.
   if (match0) {
     if (reversed_)
-      *match0 = StringPiece(ep, text.end() - ep);
+      *match0 = StringPiece(ep, static_cast<size_t>(text.end() - ep));
     else
-      *match0 = StringPiece(text.begin(), ep - text.begin());
+      *match0 =
+          StringPiece(text.begin(), static_cast<size_t>(ep - text.begin()));
   }
   return true;
 }
 
 // Build out all states in DFA.  Returns number of states.
-int DFA::BuildAllStates() {
+int DFA::BuildAllStates(const Prog::DFAStateCallback& cb) {
   if (!ok())
     return 0;
 
   // Pick out start state for unanchored search
   // at beginning of text.
   RWLocker l(&cache_mutex_);
-  SearchParams params(NULL, NULL, &l);
+  SearchParams params(StringPiece(), StringPiece(), &l);
   params.anchored = false;
-  if (!AnalyzeSearch(&params) || params.start <= SpecialStateMax)
+  if (!AnalyzeSearch(&params) ||
+      params.start == NULL ||
+      params.start == DeadState)
     return 0;
 
   // Add start state to work queue.
-  StateSet queued;
-  vector<State*> q;
-  queued.insert(params.start);
+  // Note that any State* that we handle here must point into the cache,
+  // so we can simply depend on pointer-as-a-number hashing and equality.
+  std::unordered_map<State*, int> m;
+  std::deque<State*> q;
+  m.emplace(params.start, static_cast<int>(m.size()));
   q.push_back(params.start);
 
+  // Compute the input bytes needed to cover all of the next pointers.
+  int nnext = prog_->bytemap_range() + 1;  // + 1 for kByteEndText slot
+  std::vector<int> input(nnext);
+  for (int c = 0; c < 256; c++) {
+    int b = prog_->bytemap()[c];
+    while (c < 256-1 && prog_->bytemap()[c+1] == b)
+      c++;
+    input[b] = c;
+  }
+  input[prog_->bytemap_range()] = kByteEndText;
+
+  // Scratch space for the output.
+  std::vector<int> output(nnext);
+
   // Flood to expand every state.
-  for (int i = 0; i < q.size(); i++) {
-    State* s = q[i];
-    for (int c = 0; c < 257; c++) {
+  bool oom = false;
+  while (!q.empty()) {
+    State* s = q.front();
+    q.pop_front();
+    for (int c : input) {
       State* ns = RunStateOnByteUnlocked(s, c);
-      if (ns > SpecialStateMax && queued.find(ns) == queued.end()) {
-        queued.insert(ns);
+      if (ns == NULL) {
+        oom = true;
+        break;
+      }
+      if (ns == DeadState) {
+        output[ByteMap(c)] = -1;
+        continue;
+      }
+      if (m.find(ns) == m.end()) {
+        m.emplace(ns, static_cast<int>(m.size()));
         q.push_back(ns);
       }
+      output[ByteMap(c)] = m[ns];
     }
+    if (cb)
+      cb(oom ? NULL : output.data(),
+         s == FullMatchState || s->IsMatch());
+    if (oom)
+      break;
   }
 
-  return q.size();
+  return static_cast<int>(m.size());
 }
 
 // Build out all states in DFA for kind.  Returns number of states.
-int Prog::BuildEntireDFA(MatchKind kind) {
-  //LOG(ERROR) << "BuildEntireDFA is only for testing.";
-  return GetDFA(kind)->BuildAllStates();
+int Prog::BuildEntireDFA(MatchKind kind, const DFAStateCallback& cb) {
+  return GetDFA(kind)->BuildAllStates(cb);
+}
+
+void Prog::TEST_dfa_should_bail_when_slow(bool b) {
+  dfa_should_bail_when_slow = b;
 }
 
 // Computes min and max for matching string.
@@ -1989,11 +2012,11 @@
   // Also note that previously_visited_states[UnseenStatePtr] will, in the STL
   // tradition, implicitly insert a '0' value at first use. We take advantage
   // of that property below.
-  map<State*, int> previously_visited_states;
+  std::unordered_map<State*, int> previously_visited_states;
 
   // Pick out start state for anchored search at beginning of text.
   RWLocker l(&cache_mutex_);
-  SearchParams params(NULL, NULL, &l);
+  SearchParams params(StringPiece(), StringPiece(), &l);
   params.anchored = true;
   if (!AnalyzeSearch(&params))
     return false;
@@ -2033,16 +2056,14 @@
   // Build minimum prefix.
   State* s = params.start;
   min->clear();
+  MutexLock lock(&mutex_);
   for (int i = 0; i < maxlen; i++) {
-    if (previously_visited_states[s] > kMaxEltRepetitions) {
-      VLOG(2) << "Hit kMaxEltRepetitions=" << kMaxEltRepetitions
-        << " for state s=" << s << " and min=" << CEscape(*min);
+    if (previously_visited_states[s] > kMaxEltRepetitions)
       break;
-    }
     previously_visited_states[s]++;
 
     // Stop if min is a match.
-    State* ns = RunStateOnByteUnlocked(s, kByteEndText);
+    State* ns = RunStateOnByte(s, kByteEndText);
     if (ns == NULL)  // DFA out of memory
       return false;
     if (ns != DeadState && (ns == FullMatchState || ns->IsMatch()))
@@ -2051,13 +2072,13 @@
     // Try to extend the string with low bytes.
     bool extended = false;
     for (int j = 0; j < 256; j++) {
-      ns = RunStateOnByteUnlocked(s, j);
+      ns = RunStateOnByte(s, j);
       if (ns == NULL)  // DFA out of memory
         return false;
       if (ns == FullMatchState ||
           (ns > SpecialStateMax && ns->ninst_ > 0)) {
         extended = true;
-        min->append(1, j);
+        min->append(1, static_cast<char>(j));
         s = ns;
         break;
       }
@@ -2071,23 +2092,20 @@
   s = params.start;
   max->clear();
   for (int i = 0; i < maxlen; i++) {
-    if (previously_visited_states[s] > kMaxEltRepetitions) {
-      VLOG(2) << "Hit kMaxEltRepetitions=" << kMaxEltRepetitions
-        << " for state s=" << s << " and max=" << CEscape(*max);
+    if (previously_visited_states[s] > kMaxEltRepetitions)
       break;
-    }
     previously_visited_states[s] += 1;
 
     // Try to extend the string with high bytes.
     bool extended = false;
     for (int j = 255; j >= 0; j--) {
-      State* ns = RunStateOnByteUnlocked(s, j);
+      State* ns = RunStateOnByte(s, j);
       if (ns == NULL)
         return false;
       if (ns == FullMatchState ||
           (ns > SpecialStateMax && ns->ninst_ > 0)) {
         extended = true;
-        max->append(1, j);
+        max->append(1, static_cast<char>(j));
         s = ns;
         break;
       }
@@ -2099,7 +2117,7 @@
   }
 
   // Stopped while still adding to *max - round aaaaaaaaaa... to aaaa...b
-  *max = PrefixSuccessor(*max);
+  PrefixSuccessor(max);
 
   // If there are no bytes left, we have no way to say "there is no maximum
   // string".  We could make the interface more complicated and be able to
@@ -2115,18 +2133,9 @@
 
 // PossibleMatchRange for a Prog.
 bool Prog::PossibleMatchRange(string* min, string* max, int maxlen) {
-  DFA* dfa = NULL;
-  {
-    MutexLock l(&dfa_mutex_);
-    // Have to use dfa_longest_ to get all strings for full matches.
-    // For example, (a|aa) never matches aa in first-match mode.
-    if (dfa_longest_ == NULL) {
-      dfa_longest_ = new DFA(this, Prog::kLongestMatch, dfa_mem_/2);
-      delete_dfa_ = DeleteDFA;
-    }
-    dfa = dfa_longest_;
-  }
-  return dfa->PossibleMatchRange(min, max, maxlen);
+  // Have to use dfa_longest_ to get all strings for full matches.
+  // For example, (a|aa) never matches aa in first-match mode.
+  return GetDFA(kLongestMatch)->PossibleMatchRange(min, max, maxlen);
 }
 
 }  // namespace re2
diff --git a/re2/filtered_re2.cc b/re2/filtered_re2.cc
index f576258..12f638a 100644
--- a/re2/filtered_re2.cc
+++ b/re2/filtered_re2.cc
@@ -2,9 +2,13 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-#include <string>
-#include "util/util.h"
 #include "re2/filtered_re2.h"
+
+#include <stddef.h>
+#include <string>
+
+#include "util/util.h"
+#include "util/logging.h"
 #include "re2/prefilter.h"
 #include "re2/prefilter_tree.h"
 
@@ -15,8 +19,13 @@
       prefilter_tree_(new PrefilterTree()) {
 }
 
+FilteredRE2::FilteredRE2(int min_atom_len)
+    : compiled_(false),
+      prefilter_tree_(new PrefilterTree(min_atom_len)) {
+}
+
 FilteredRE2::~FilteredRE2() {
-  for (int i = 0; i < re2_vec_.size(); i++)
+  for (size_t i = 0; i < re2_vec_.size(); i++)
     delete re2_vec_[i];
   delete prefilter_tree_;
 }
@@ -33,20 +42,25 @@
     }
     delete re;
   } else {
-    *id = re2_vec_.size();
+    *id = static_cast<int>(re2_vec_.size());
     re2_vec_.push_back(re);
   }
 
   return code;
 }
 
-void FilteredRE2::Compile(vector<string>* atoms) {
-  if (compiled_ || re2_vec_.size() == 0) {
-    LOG(INFO) << "C: " << compiled_ << " S:" << re2_vec_.size();
+void FilteredRE2::Compile(std::vector<string>* atoms) {
+  if (compiled_) {
+    LOG(ERROR) << "Compile called already.";
     return;
   }
 
-  for (int i = 0; i < re2_vec_.size(); i++) {
+  if (re2_vec_.empty()) {
+    LOG(ERROR) << "Compile called before Add.";
+    return;
+  }
+
+  for (size_t i = 0; i < re2_vec_.size(); i++) {
     Prefilter* prefilter = Prefilter::FromRE2(re2_vec_[i]);
     prefilter_tree_->Add(prefilter);
   }
@@ -56,21 +70,21 @@
 }
 
 int FilteredRE2::SlowFirstMatch(const StringPiece& text) const {
-  for (int i = 0; i < re2_vec_.size(); i++)
+  for (size_t i = 0; i < re2_vec_.size(); i++)
     if (RE2::PartialMatch(text, *re2_vec_[i]))
-      return i;
+      return static_cast<int>(i);
   return -1;
 }
 
 int FilteredRE2::FirstMatch(const StringPiece& text,
-                            const vector<int>& atoms) const {
+                            const std::vector<int>& atoms) const {
   if (!compiled_) {
-    LOG(DFATAL) << "FirstMatch called before Compile";
+    LOG(DFATAL) << "FirstMatch called before Compile.";
     return -1;
   }
-  vector<int> regexps;
+  std::vector<int> regexps;
   prefilter_tree_->RegexpsGivenStrings(atoms, &regexps);
-  for (int i = 0; i < regexps.size(); i++)
+  for (size_t i = 0; i < regexps.size(); i++)
     if (RE2::PartialMatch(text, *re2_vec_[regexps[i]]))
       return regexps[i];
   return -1;
@@ -78,22 +92,27 @@
 
 bool FilteredRE2::AllMatches(
     const StringPiece& text,
-    const vector<int>& atoms,
-    vector<int>* matching_regexps) const {
+    const std::vector<int>& atoms,
+    std::vector<int>* matching_regexps) const {
   matching_regexps->clear();
-  vector<int> regexps;
+  std::vector<int> regexps;
   prefilter_tree_->RegexpsGivenStrings(atoms, &regexps);
-  for (int i = 0; i < regexps.size(); i++)
+  for (size_t i = 0; i < regexps.size(); i++)
     if (RE2::PartialMatch(text, *re2_vec_[regexps[i]]))
       matching_regexps->push_back(regexps[i]);
   return !matching_regexps->empty();
 }
 
-void FilteredRE2::RegexpsGivenStrings(const vector<int>& matched_atoms,
-                                      vector<int>* passed_regexps) {
-  prefilter_tree_->RegexpsGivenStrings(matched_atoms, passed_regexps);
+void FilteredRE2::AllPotentials(
+    const std::vector<int>& atoms,
+    std::vector<int>* potential_regexps) const {
+  prefilter_tree_->RegexpsGivenStrings(atoms, potential_regexps);
 }
 
+void FilteredRE2::RegexpsGivenStrings(const std::vector<int>& matched_atoms,
+                                      std::vector<int>* passed_regexps) {
+  prefilter_tree_->RegexpsGivenStrings(matched_atoms, passed_regexps);
+}
 
 void FilteredRE2::PrintPrefilter(int regexpid) {
   prefilter_tree_->PrintPrefilter(regexpid);
diff --git a/re2/filtered_re2.h b/re2/filtered_re2.h
index 64b35be..b1317cc 100644
--- a/re2/filtered_re2.h
+++ b/re2/filtered_re2.h
@@ -2,6 +2,9 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+#ifndef RE2_FILTERED_RE2_H_
+#define RE2_FILTERED_RE2_H_
+
 // The class FilteredRE2 is used as a wrapper to multiple RE2 regexps.
 // It provides a prefilter mechanism that helps in cutting down the
 // number of regexps that need to be actually searched.
@@ -18,20 +21,19 @@
 // indices of strings that were found in the text to get the actual
 // regexp matches.
 
-#ifndef RE2_FILTERED_RE2_H_
-#define RE2_FILTERED_RE2_H_
-
+#include <string>
 #include <vector>
+
 #include "re2/re2.h"
 
 namespace re2 {
-using std::vector;
 
 class PrefilterTree;
 
 class FilteredRE2 {
  public:
   FilteredRE2();
+  explicit FilteredRE2(int min_atom_len);
   ~FilteredRE2();
 
   // Uses RE2 constructor to create a RE2 object (re). Returns
@@ -47,7 +49,7 @@
   // the search text should be lowercased first to find matching
   // strings from the set of strings returned by Compile.  Call after
   // all Add calls are done.
-  void Compile(vector<string>* strings_to_match);
+  void Compile(std::vector<string>* strings_to_match);
 
   // Returns the index of the first matching regexp.
   // Returns -1 on no match. Can be called prior to Compile.
@@ -59,16 +61,24 @@
   // Returns -1 on no match. Compile has to be called before
   // calling this.
   int FirstMatch(const StringPiece& text,
-                 const vector<int>& atoms) const;
+                 const std::vector<int>& atoms) const;
 
   // Returns the indices of all matching regexps, after first clearing
   // matched_regexps.
   bool AllMatches(const StringPiece& text,
-                  const vector<int>& atoms,
-                  vector<int>* matching_regexps) const;
+                  const std::vector<int>& atoms,
+                  std::vector<int>* matching_regexps) const;
+
+  // Returns the indices of all potentially matching regexps after first
+  // clearing potential_regexps.
+  // A regexp is potentially matching if it passes the filter.
+  // If a regexp passes the filter it may still not match.
+  // A regexp that does not pass the filter is guaranteed to not match.
+  void AllPotentials(const std::vector<int>& atoms,
+                     std::vector<int>* potential_regexps) const;
 
   // The number of regexps added.
-  int NumRegexps() const { return re2_vec_.size(); }
+  int NumRegexps() const { return static_cast<int>(re2_vec_.size()); }
 
  private:
 
@@ -79,11 +89,11 @@
   void PrintPrefilter(int regexpid);
 
   // Useful for testing and debugging.
-  void RegexpsGivenStrings(const vector<int>& matched_atoms,
-                           vector<int>* passed_regexps);
+  void RegexpsGivenStrings(const std::vector<int>& matched_atoms,
+                           std::vector<int>* passed_regexps);
 
   // All the regexps in the FilteredRE2.
-  vector<RE2*> re2_vec_;
+  std::vector<RE2*> re2_vec_;
 
   // Has the FilteredRE2 been compiled using Compile()
   bool compiled_;
@@ -91,9 +101,8 @@
   // An AND-OR tree of string atoms used for filtering regexps.
   PrefilterTree* prefilter_tree_;
 
-  //DISALLOW_EVIL_CONSTRUCTORS(FilteredRE2);
-  FilteredRE2(const FilteredRE2&);
-  void operator=(const FilteredRE2&);
+  FilteredRE2(const FilteredRE2&) = delete;
+  FilteredRE2& operator=(const FilteredRE2&) = delete;
 };
 
 }  // namespace re2
diff --git a/re2/fuzzing/re2_fuzzer.cc b/re2/fuzzing/re2_fuzzer.cc
new file mode 100644
index 0000000..83971a1
--- /dev/null
+++ b/re2/fuzzing/re2_fuzzer.cc
@@ -0,0 +1,169 @@
+// Copyright 2016 The RE2 Authors.  All Rights Reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+#include <map>
+#include <memory>
+#include <queue>
+#include <string>
+
+#include "re2/prefilter.h"
+#include "re2/re2.h"
+
+using re2::StringPiece;
+using std::string;
+
+// NOT static, NOT signed.
+uint8_t dummy = 0;
+
+void Test(StringPiece pattern, const RE2::Options& options, StringPiece text) {
+  RE2 re(pattern, options);
+  if (!re.ok())
+    return;
+
+  // Don't waste time fuzzing high-size programs.
+  // They can cause bug reports due to fuzzer timeouts.
+  int size = re.ProgramSize();
+  if (size > 9999)
+    return;
+  int rsize = re.ReverseProgramSize();
+  if (rsize > 9999)
+    return;
+
+  // Don't waste time fuzzing high-fanout programs.
+  // They can cause bug reports due to fuzzer timeouts.
+  std::map<int, int> histogram;
+  int fanout = re.ProgramFanout(&histogram);
+  if (fanout > 9)
+    return;
+  int rfanout = re.ReverseProgramFanout(&histogram);
+  if (rfanout > 9)
+    return;
+
+  // Don't waste time fuzzing programs with large substrings.
+  // They can cause bug reports due to fuzzer timeouts when they
+  // are repetitions (e.g. hundreds of NUL bytes) and matching is
+  // unanchored. And they aren't interesting for fuzzing purposes.
+  std::unique_ptr<re2::Prefilter> prefilter(re2::Prefilter::FromRE2(&re));
+  if (prefilter == nullptr)
+    return;
+  std::queue<re2::Prefilter*> nodes;
+  nodes.push(prefilter.get());
+  while (!nodes.empty()) {
+    re2::Prefilter* node = nodes.front();
+    nodes.pop();
+    if (node->op() == re2::Prefilter::ATOM) {
+      if (node->atom().size() > 9)
+        return;
+    } else if (node->op() == re2::Prefilter::AND ||
+               node->op() == re2::Prefilter::OR) {
+      for (re2::Prefilter* sub : *node->subs())
+        nodes.push(sub);
+    }
+  }
+
+  if (re.NumberOfCapturingGroups() == 0) {
+    // Avoid early return due to too many arguments.
+    StringPiece sp = text;
+    RE2::FullMatch(sp, re);
+    RE2::PartialMatch(sp, re);
+    RE2::Consume(&sp, re);
+    sp = text;  // Reset.
+    RE2::FindAndConsume(&sp, re);
+  } else {
+    // Okay, we have at least one capturing group...
+    // Try conversion for variously typed arguments.
+    StringPiece sp = text;
+    short s;
+    RE2::FullMatch(sp, re, &s);
+    long l;
+    RE2::PartialMatch(sp, re, &l);
+    float f;
+    RE2::Consume(&sp, re, &f);
+    sp = text;  // Reset.
+    double d;
+    RE2::FindAndConsume(&sp, re, &d);
+  }
+
+  string s = string(text);
+  RE2::Replace(&s, re, "");
+  s = string(text);  // Reset.
+  RE2::GlobalReplace(&s, re, "");
+
+  string min, max;
+  re.PossibleMatchRange(&min, &max, /*maxlen=*/9);
+
+  // Exercise some other API functionality.
+  dummy += re.NamedCapturingGroups().size();
+  dummy += re.CapturingGroupNames().size();
+  dummy += RE2::QuoteMeta(pattern).size();
+}
+
+// Entry point for libFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  if (size == 0 || size > 999)
+    return 0;
+
+  // Crudely limit the use of ., \p, \P, \d, \D, \s, \S, \w and \W.
+  // Otherwise, we will waste time on inputs that have long runs of various
+  // character classes. The fuzzer has shown itself to be easily capable of
+  // generating such patterns that fall within the other limits, but result
+  // in timeouts nonetheless. The marginal cost is high - even more so when
+  // counted repetition is involved - whereas the marginal benefit is zero.
+  // TODO(junyer): Handle [:isalnum:] et al. when they start to cause pain.
+  int cc = 0;
+  for (size_t i = 0; i < size; i++) {
+    if (data[i] == '.')
+      cc++;
+    if (data[i] != '\\')
+      continue;
+    i++;
+    if (i >= size)
+      break;
+    if (data[i] == 'p' || data[i] == 'P' ||
+        data[i] == 'd' || data[i] == 'D' ||
+        data[i] == 's' || data[i] == 'S' ||
+        data[i] == 'w' || data[i] == 'W')
+      cc++;
+  }
+  if (cc > 9)
+    return 0;
+
+  // The one-at-a-time hash by Bob Jenkins.
+  uint32_t hash = 0;
+  for (size_t i = 0; i < size; i++) {
+    hash += data[i];
+    hash += (hash << 10);
+    hash ^= (hash >> 6);
+  }
+  hash += (hash << 3);
+  hash ^= (hash >> 11);
+  hash += (hash << 15);
+
+  RE2::Options options;
+  options.set_log_errors(false);
+  options.set_max_mem(64 << 20);
+  options.set_encoding(hash & 1 ? RE2::Options::EncodingLatin1
+                                : RE2::Options::EncodingUTF8);
+  options.set_posix_syntax(hash & 2);
+  options.set_longest_match(hash & 4);
+  options.set_literal(hash & 8);
+  options.set_never_nl(hash & 16);
+  options.set_dot_nl(hash & 32);
+  options.set_never_capture(hash & 64);
+  options.set_case_sensitive(hash & 128);
+  options.set_perl_classes(hash & 256);
+  options.set_word_boundary(hash & 512);
+  options.set_one_line(hash & 1024);
+
+  const char* ptr = reinterpret_cast<const char*>(data);
+  int len = static_cast<int>(size);
+
+  StringPiece pattern(ptr, len);
+  StringPiece text(ptr, len);
+  Test(pattern, options, text);
+
+  return 0;
+}
diff --git a/re2/make_perl_groups.pl b/re2/make_perl_groups.pl
index d5eaa59..d9fcdaf 100755
--- a/re2/make_perl_groups.pl
+++ b/re2/make_perl_groups.pl
@@ -32,14 +32,20 @@
 	"\\w",
 );
 
+%overrides = (
+	# Prior to Perl 5.18, \s did not match vertical tab.
+	# RE2 preserves that original behaviour.
+	"\\s:11" => 0,
+);
+
 sub ComputeClass($) {
+  my ($cname) = @_;
   my @ranges;
-  my ($class) = @_;
-  my $regexp = "[$class]";
+  my $regexp = qr/[$cname]/;
   my $start = -1;
   for (my $i=0; $i<=129; $i++) {
     if ($i == 129) { $i = 256; }
-    if ($i <= 128 && chr($i) =~ $regexp) {
+    if ($i <= 128 && ($overrides{"$cname:$i"} // chr($i) =~ $regexp)) {
       if ($start < 0) {
         $start = $i;
       }
@@ -54,15 +60,15 @@
 }
 
 sub PrintClass($$@) {
-  my ($cname, $name, @ranges) = @_;
-  print "static URange16 code${cname}[] = {  /* $name */\n";
+  my ($cnum, $cname, @ranges) = @_;
+  print "static const URange16 code${cnum}[] = {  /* $cname */\n";
   for (my $i=0; $i<@ranges; $i++) {
     my @a = @{$ranges[$i]};
     printf "\t{ 0x%x, 0x%x },\n", $a[0], $a[1];
   }
   print "};\n";
   my $n = @ranges;
-  my $escname = $name;
+  my $escname = $cname;
   $escname =~ s/\\/\\\\/g;
   $negname = $escname;
   if ($negname =~ /:/) {
@@ -70,25 +76,25 @@
   } else {
     $negname =~ y/a-z/A-Z/;
   }
-  return "{ \"$escname\", +1, code$cname, $n }", "{ \"$negname\", -1, code$cname, $n }";
+  return "{ \"$escname\", +1, code$cnum, $n }", "{ \"$negname\", -1, code$cnum, $n }";
 }
 
-my $gen = 0;
+my $cnum = 0;
 
 sub PrintClasses($@) {
-  my ($cname, @classes) = @_;
+  my ($pname, @classes) = @_;
   my @entries;
-  foreach my $cl (@classes) {
-    my @ranges = ComputeClass($cl);
-    push @entries, PrintClass(++$gen, $cl, @ranges);
+  foreach my $cname (@classes) {
+    my @ranges = ComputeClass($cname);
+    push @entries, PrintClass(++$cnum, $cname, @ranges);
   }
-  print "UGroup ${cname}_groups[] = {\n";
+  print "const UGroup ${pname}_groups[] = {\n";
   foreach my $e (@entries) {
     print "\t$e,\n";
   }
   print "};\n";
   my $count = @entries;
-  print "int num_${cname}_groups = $count;\n";
+  print "const int num_${pname}_groups = $count;\n";
 }
 
 print <<EOF;
diff --git a/re2/make_unicode_casefold.py b/re2/make_unicode_casefold.py
index 3375d2e..d215eb1 100755
--- a/re2/make_unicode_casefold.py
+++ b/re2/make_unicode_casefold.py
@@ -9,7 +9,8 @@
 
 """Generate C++ table for Unicode case folding."""
 
-import unicode, sys
+import sys
+import unicode
 
 _header = """
 // GENERATED BY make_unicode_casefold.py; DO NOT EDIT.
@@ -130,11 +131,11 @@
     foldpairs.sort()
     foldranges = _MakeRanges(foldpairs)
     print "// %d groups, %d pairs, %d ranges" % (len(casegroups), len(foldpairs), len(foldranges))
-    print "CaseFold unicode_%s[] = {" % (name,)
+    print "const CaseFold unicode_%s[] = {" % (name,)
     for lo, hi, delta in foldranges:
       print "\t{ %d, %d, %s }," % (lo, hi, delta)
     print "};"
-    print "int num_unicode_%s = %d;" % (name, len(foldranges),)
+    print "const int num_unicode_%s = %d;" % (name, len(foldranges),)
     print ""
 
   print _header
diff --git a/re2/make_unicode_groups.py b/re2/make_unicode_groups.py
index c2e25c1..e97d47e 100755
--- a/re2/make_unicode_groups.py
+++ b/re2/make_unicode_groups.py
@@ -41,7 +41,7 @@
 
 def PrintRanges(type, name, ranges):
   """Print the ranges as an array of type named name."""
-  print "static %s %s[] = {" % (type, name,)
+  print "static const %s %s[] = {" % (type, name,)
   for lo, hi in ranges:
     print "\t{ %d, %d }," % (lo, hi)
   print "};"
@@ -74,7 +74,7 @@
 
   ugroup = "{ \"%s\", +1" % (name,)
   # if len(code16) > 0:
-  #   PrintCodes("uint16", name+"_code16", code16)
+  #   PrintCodes("uint16_t", name+"_code16", code16)
   #   ugroup += ", %s_code16, %d" % (name, len(code16))
   # else:
   #   ugroup += ", 0, 0"
@@ -99,12 +99,12 @@
   for name, codes in unicode.Scripts().iteritems():
     ugroups.append(PrintGroup(name, codes))
   print "// %d 16-bit ranges, %d 32-bit ranges" % (n16, n32)
-  print "UGroup unicode_groups[] = {";
+  print "const UGroup unicode_groups[] = {";
   ugroups.sort()
   for ug in ugroups:
     print "\t%s," % (ug,)
   print "};"
-  print "int num_unicode_groups = %d;" % (len(ugroups),)
+  print "const int num_unicode_groups = %d;" % (len(ugroups),)
   print _trailer
 
 if __name__ == '__main__':
diff --git a/re2/mimics_pcre.cc b/re2/mimics_pcre.cc
index fc6dd4a..ad197be 100644
--- a/re2/mimics_pcre.cc
+++ b/re2/mimics_pcre.cc
@@ -23,6 +23,7 @@
 // Regexp::MimicsPCRE checks for any of these conditions.
 
 #include "util/util.h"
+#include "util/logging.h"
 #include "re2/regexp.h"
 #include "re2/walker-inl.h"
 
@@ -124,7 +125,8 @@
   }
 
  private:
-  DISALLOW_EVIL_CONSTRUCTORS(EmptyStringWalker);
+  EmptyStringWalker(const EmptyStringWalker&) = delete;
+  EmptyStringWalker& operator=(const EmptyStringWalker&) = delete;
 };
 
 // Called after visiting re's children.  child_args contains the return
diff --git a/re2/nfa.cc b/re2/nfa.cc
index 8c4f761..04d4c6f 100644
--- a/re2/nfa.cc
+++ b/re2/nfa.cc
@@ -24,13 +24,25 @@
 // Like Thompson's original machine and like the DFA implementation, this
 // implementation notices a match only once it is one byte past it.
 
+#include <stdio.h>
+#include <string.h>
+#include <algorithm>
+#include <string>
+#include <utility>
+#include <vector>
+
 #include "re2/prog.h"
 #include "re2/regexp.h"
+#include "util/logging.h"
+#include "util/pod_array.h"
 #include "util/sparse_array.h"
 #include "util/sparse_set.h"
+#include "util/strutil.h"
 
 namespace re2 {
 
+static const bool ExtraDebug = false;
+
 class NFA {
  public:
   NFA(Prog* prog);
@@ -51,12 +63,10 @@
               bool anchored, bool longest,
               StringPiece* submatch, int nsubmatch);
 
-  static const int Debug = 0;
-
  private:
   struct Thread {
     union {
-      int id;
+      int ref;
       Thread* next;  // when on free list
     };
     const char** capture;
@@ -64,16 +74,8 @@
 
   // State for explicit stack in AddToThreadq.
   struct AddState {
-    int id;           // Inst to process
-    int j;
-    const char* cap_j;  // if j>=0, set capture[j] = cap_j before processing ip
-
-    AddState()
-      : id(0), j(-1), cap_j(NULL) {}
-    explicit AddState(int id)
-      : id(id), j(-1), cap_j(NULL) {}
-    AddState(int id, const char* cap_j, int j)
-      : id(id), j(j), cap_j(cap_j) {}
+    int id;     // Inst to process
+    Thread* t;  // if not null, set t0 = t before processing id
   };
 
   // Threadq is a list of threads.  The list is sorted by the order
@@ -82,52 +84,51 @@
   typedef SparseArray<Thread*> Threadq;
 
   inline Thread* AllocThread();
-  inline void FreeThread(Thread*);
+  inline Thread* Incref(Thread* t);
+  inline void Decref(Thread* t);
 
-  // Add id (or its children, following unlabeled arrows)
-  // to the workqueue q with associated capture info.
-  void AddToThreadq(Threadq* q, int id, int flag,
-                    const char* p, const char** capture);
+  // Follows all empty arrows from id0 and enqueues all the states reached.
+  // Enqueues only the ByteRange instructions that match byte c.
+  // context is used (with p) for evaluating empty-width specials.
+  // p is the current input position, and t0 is the current thread.
+  void AddToThreadq(Threadq* q, int id0, int c, const StringPiece& context,
+                    const char* p, Thread* t0);
 
   // Run runq on byte c, appending new states to nextq.
   // Updates matched_ and match_ as new, better matches are found.
-  // p is position of the next byte (the one after c)
-  // in the input string, used when processing capturing parens.
-  // flag is the bitwise or of Bol, Eol, etc., specifying whether
-  // ^, $ and \b match the current input point (after c).
-  inline int Step(Threadq* runq, Threadq* nextq, int c, int flag, const char* p);
+  // context is used (with p) for evaluating empty-width specials.
+  // p is the position of byte c in the input string for AddToThreadq;
+  // p-1 will be used when processing Match instructions.
+  // Frees all the threads on runq.
+  // If there is a shortcut to the end, returns that shortcut.
+  int Step(Threadq* runq, Threadq* nextq, int c, const StringPiece& context,
+           const char* p);
 
   // Returns text version of capture information, for debugging.
   string FormatCapture(const char** capture);
 
   inline void CopyCapture(const char** dst, const char** src);
 
-  // Computes whether all matches must begin with the same first
-  // byte, and if so, returns that byte.  If not, returns -1.
-  int ComputeFirstByte();
+  Prog* prog_;                // underlying program
+  int start_;                 // start instruction in program
+  int ncapture_;              // number of submatches to track
+  bool longest_;              // whether searching for longest match
+  bool endmatch_;             // whether match must end at text.end()
+  const char* btext_;         // beginning of text being matched (for FormatSubmatch)
+  const char* etext_;         // end of text being matched (for endmatch_)
+  Threadq q0_, q1_;           // pre-allocated for Search.
+  PODArray<AddState> stack_;  // pre-allocated for AddToThreadq
+  Thread* free_threads_;      // free list
+  const char** match_;        // best match so far
+  bool matched_;              // any match so far?
 
-  Prog* prog_;          // underlying program
-  int start_;           // start instruction in program
-  int ncapture_;        // number of submatches to track
-  bool longest_;        // whether searching for longest match
-  bool endmatch_;       // whether match must end at text.end()
-  const char* btext_;   // beginning of text being matched (for FormatSubmatch)
-  const char* etext_;   // end of text being matched (for endmatch_)
-  Threadq q0_, q1_;     // pre-allocated for Search.
-  const char** match_;  // best match so far
-  bool matched_;        // any match so far?
-  AddState* astack_;    // pre-allocated for AddToThreadq
-  int nastack_;
-  int first_byte_;      // required first byte for match, or -1 if none
-
-  Thread* free_threads_;  // free list
-
-  DISALLOW_EVIL_CONSTRUCTORS(NFA);
+  NFA(const NFA&) = delete;
+  NFA& operator=(const NFA&) = delete;
 };
 
 NFA::NFA(Prog* prog) {
   prog_ = prog;
-  start_ = prog->start();
+  start_ = prog_->start();
   ncapture_ = 0;
   longest_ = false;
   endmatch_ = false;
@@ -135,17 +136,18 @@
   etext_ = NULL;
   q0_.resize(prog_->size());
   q1_.resize(prog_->size());
-  nastack_ = 2*prog_->size();
-  astack_ = new AddState[nastack_];
+  // See NFA::AddToThreadq() for why this is so.
+  int nstack = 2*prog_->inst_count(kInstCapture) +
+               prog_->inst_count(kInstEmptyWidth) +
+               prog_->inst_count(kInstNop) + 1;  // + 1 for start inst
+  stack_ = PODArray<AddState>(nstack);
+  free_threads_ = NULL;
   match_ = NULL;
   matched_ = false;
-  free_threads_ = NULL;
-  first_byte_ = ComputeFirstByte();
 }
 
 NFA::~NFA() {
   delete[] match_;
-  delete[] astack_;
   Thread* next;
   for (Thread* t = free_threads_; t; t = next) {
     next = t->next;
@@ -154,24 +156,36 @@
   }
 }
 
-void NFA::FreeThread(Thread *t) {
-  if (t == NULL)
-    return;
-  t->next = free_threads_;
-  free_threads_ = t;
-}
-
 NFA::Thread* NFA::AllocThread() {
   Thread* t = free_threads_;
   if (t == NULL) {
     t = new Thread;
+    t->ref = 1;
     t->capture = new const char*[ncapture_];
     return t;
   }
   free_threads_ = t->next;
+  t->ref = 1;
   return t;
 }
 
+NFA::Thread* NFA::Incref(Thread* t) {
+  DCHECK(t != NULL);
+  t->ref++;
+  return t;
+}
+
+void NFA::Decref(Thread* t) {
+  if (t == NULL)
+    return;
+  t->ref--;
+  if (t->ref > 0)
+    return;
+  DCHECK_EQ(t->ref, 0);
+  t->next = free_threads_;
+  free_threads_ = t;
+}
+
 void NFA::CopyCapture(const char** dst, const char** src) {
   for (int i = 0; i < ncapture_; i+=2) {
     dst[i] = src[i];
@@ -180,35 +194,43 @@
 }
 
 // Follows all empty arrows from id0 and enqueues all the states reached.
-// The bits in flag (Bol, Eol, etc.) specify whether ^, $ and \b match.
-// The pointer p is the current input position, and m is the
-// current set of match boundaries.
-void NFA::AddToThreadq(Threadq* q, int id0, int flag,
-                       const char* p, const char** capture) {
+// Enqueues only the ByteRange instructions that match byte c.
+// context is used (with p) for evaluating empty-width specials.
+// p is the current input position, and t0 is the current thread.
+void NFA::AddToThreadq(Threadq* q, int id0, int c, const StringPiece& context,
+                       const char* p, Thread* t0) {
   if (id0 == 0)
     return;
 
-  // Astack_ is pre-allocated to avoid resize operations.
-  // It has room for 2*prog_->size() entries, which is enough:
-  // Each inst in prog can be processed at most once,
-  // pushing at most two entries on stk.
-
+  // Use stack_ to hold our stack of instructions yet to process.
+  // It was preallocated as follows:
+  //   two entries per Capture;
+  //   one entry per EmptyWidth; and
+  //   one entry per Nop.
+  // This reflects the maximum number of stack pushes that each can
+  // perform. (Each instruction can be processed at most once.)
+  AddState* stk = stack_.data();
   int nstk = 0;
-  AddState* stk = astack_;
-  stk[nstk++] = AddState(id0);
 
+  stk[nstk++] = {id0, NULL};
   while (nstk > 0) {
-    DCHECK_LE(nstk, nastack_);
-    const AddState& a = stk[--nstk];
-    if (a.j >= 0)
-      capture[a.j] = a.cap_j;
+    DCHECK_LE(nstk, stack_.size());
+    AddState a = stk[--nstk];
+
+  Loop:
+    if (a.t != NULL) {
+      // t0 was a thread that we allocated and copied in order to
+      // record the capture, so we must now decref it.
+      Decref(t0);
+      t0 = a.t;
+    }
 
     int id = a.id;
     if (id == 0)
       continue;
     if (q->has_index(id)) {
-      if (Debug)
-        fprintf(stderr, "  [%d%s]\n", id, FormatCapture(capture).c_str());
+      if (ExtraDebug)
+        fprintf(stderr, "  [%d%s]\n", id, FormatCapture(t0->capture).c_str());
       continue;
     }
 
@@ -216,8 +238,7 @@
     // or we might not.  Even if not, it is necessary to have it,
     // so that we don't revisit id0 during the recursion.
     q->set_new(id, NULL);
-
-    Thread** tp = &q->find(id)->second;
+    Thread** tp = &q->get_existing(id);
     int j;
     Thread* t;
     Prog::Inst* ip = prog_->inst(id);
@@ -231,81 +252,95 @@
 
     case kInstAltMatch:
       // Save state; will pick up at next byte.
-      t = AllocThread();
-      t->id = id;
-      CopyCapture(t->capture, capture);
+      t = Incref(t0);
       *tp = t;
-      // fall through
 
-    case kInstAlt:
-      // Explore alternatives.
-      stk[nstk++] = AddState(ip->out1());
-      stk[nstk++] = AddState(ip->out());
-      break;
+      DCHECK(!ip->last());
+      a = {id+1, NULL};
+      goto Loop;
 
     case kInstNop:
+      if (!ip->last())
+        stk[nstk++] = {id+1, NULL};
+
       // Continue on.
-      stk[nstk++] = AddState(ip->out());
-      break;
+      a = {ip->out(), NULL};
+      goto Loop;
 
     case kInstCapture:
+      if (!ip->last())
+        stk[nstk++] = {id+1, NULL};
+
       if ((j=ip->cap()) < ncapture_) {
-        // Push a dummy whose only job is to restore capture[j]
+        // Push a dummy whose only job is to restore t0
         // once we finish exploring this possibility.
-        stk[nstk++] = AddState(0, capture[j], j);
+        stk[nstk++] = {0, t0};
 
         // Record capture.
-        capture[j] = p;
+        t = AllocThread();
+        CopyCapture(t->capture, t0->capture);
+        t->capture[j] = p;
+        t0 = t;
       }
-      stk[nstk++] = AddState(ip->out());
-      break;
+      a = {ip->out(), NULL};
+      goto Loop;
+
+    case kInstByteRange:
+      if (!ip->Matches(c))
+        goto Next;
+      FALLTHROUGH_INTENDED;
 
     case kInstMatch:
-    case kInstByteRange:
       // Save state; will pick up at next byte.
-      t = AllocThread();
-      t->id = id;
-      CopyCapture(t->capture, capture);
+      t = Incref(t0);
       *tp = t;
-      if (Debug)
-        fprintf(stderr, " + %d%s [%p]\n", id, FormatCapture(t->capture).c_str(), t);
-      break;
+      if (ExtraDebug)
+        fprintf(stderr, " + %d%s\n", id, FormatCapture(t0->capture).c_str());
+
+    Next:
+      if (ip->last())
+        break;
+      a = {id+1, NULL};
+      goto Loop;
 
     case kInstEmptyWidth:
+      if (!ip->last())
+        stk[nstk++] = {id+1, NULL};
+
       // Continue on if we have all the right flag bits.
-      if (ip->empty() & ~flag)
+      if (ip->empty() & ~Prog::EmptyFlags(context, p))
         break;
-      stk[nstk++] = AddState(ip->out());
-      break;
+      a = {ip->out(), NULL};
+      goto Loop;
     }
   }
 }
 
 // Run runq on byte c, appending new states to nextq.
-// Updates match as new, better matches are found.
-// p is position of the byte c in the input string,
-// used when processing capturing parens.
-// flag is the bitwise or of Bol, Eol, etc., specifying whether
-// ^, $ and \b match the current input point (after c).
+// Updates matched_ and match_ as new, better matches are found.
+// context is used (with p) for evaluating empty-width specials.
+// p is the position of byte c in the input string for AddToThreadq;
+// p-1 will be used when processing Match instructions.
 // Frees all the threads on runq.
 // If there is a shortcut to the end, returns that shortcut.
-int NFA::Step(Threadq* runq, Threadq* nextq, int c, int flag, const char* p) {
+int NFA::Step(Threadq* runq, Threadq* nextq, int c, const StringPiece& context,
+              const char* p) {
   nextq->clear();
 
   for (Threadq::iterator i = runq->begin(); i != runq->end(); ++i) {
-    Thread* t = i->second;
+    Thread* t = i->value();
     if (t == NULL)
       continue;
 
     if (longest_) {
       // Can skip any threads started after our current best match.
       if (matched_ && match_[0] < t->capture[0]) {
-        FreeThread(t);
+        Decref(t);
         continue;
       }
     }
 
-    int id = t->id;
+    int id = i->index();
     Prog::Inst* ip = prog_->inst(id);
 
     switch (ip->opcode()) {
@@ -315,8 +350,7 @@
         break;
 
       case kInstByteRange:
-        if (ip->Matches(c))
-          AddToThreadq(nextq, ip->out(), flag, p+1, t->capture);
+        AddToThreadq(nextq, ip->out(), c, context, p, t);
         break;
 
       case kInstAltMatch:
@@ -324,52 +358,58 @@
           break;
         // The match is ours if we want it.
         if (ip->greedy(prog_) || longest_) {
-          CopyCapture((const char**)match_, t->capture);
-          FreeThread(t);
-          for (++i; i != runq->end(); ++i)
-            FreeThread(i->second);
-          runq->clear();
+          CopyCapture(match_, t->capture);
           matched_ = true;
+
+          Decref(t);
+          for (++i; i != runq->end(); ++i)
+            Decref(i->value());
+          runq->clear();
           if (ip->greedy(prog_))
             return ip->out1();
           return ip->out();
         }
         break;
 
-      case kInstMatch:
-        if (endmatch_ && p != etext_)
+      case kInstMatch: {
+        // Avoid invoking undefined behavior when p happens
+        // to be null - and p-1 would be meaningless anyway.
+        if (p == NULL)
           break;
 
-        const char* old = t->capture[1];  // previous end pointer
-        t->capture[1] = p;
+        if (endmatch_ && p-1 != etext_)
+          break;
+
         if (longest_) {
           // Leftmost-longest mode: save this match only if
           // it is either farther to the left or at the same
           // point but longer than an existing match.
           if (!matched_ || t->capture[0] < match_[0] ||
-              (t->capture[0] == match_[0] && t->capture[1] > match_[1]))
-            CopyCapture((const char**)match_, t->capture);
+              (t->capture[0] == match_[0] && p-1 > match_[1])) {
+            CopyCapture(match_, t->capture);
+            match_[1] = p-1;
+            matched_ = true;
+          }
         } else {
           // Leftmost-biased mode: this match is by definition
           // better than what we've already found (see next line).
-          CopyCapture((const char**)match_, t->capture);
+          CopyCapture(match_, t->capture);
+          match_[1] = p-1;
+          matched_ = true;
 
           // Cut off the threads that can only find matches
           // worse than the one we just found: don't run the
           // rest of the current Threadq.
-          t->capture[0] = old;
-          FreeThread(t);
+          Decref(t);
           for (++i; i != runq->end(); ++i)
-            FreeThread(i->second);
+            Decref(i->value());
           runq->clear();
-          matched_ = true;
           return 0;
         }
-        t->capture[0] = old;
-        matched_ = true;
         break;
+      }
     }
-    FreeThread(t);
+    Decref(t);
   }
   runq->clear();
   return 0;
@@ -391,12 +431,6 @@
   return s;
 }
 
-// Returns whether haystack contains needle's memory.
-static bool StringPieceContains(const StringPiece haystack, const StringPiece needle) {
-  return haystack.begin() <= needle.begin() &&
-         haystack.end() >= needle.end();
-}
-
 bool NFA::Search(const StringPiece& text, const StringPiece& const_context,
             bool anchored, bool longest,
             StringPiece* submatch, int nsubmatch) {
@@ -407,12 +441,9 @@
   if (context.begin() == NULL)
     context = text;
 
-  if (!StringPieceContains(context, text)) {
-    LOG(FATAL) << "Bad args: context does not contain text "
-                << reinterpret_cast<const void*>(context.begin())
-                << "+" << context.size() << " "
-                << reinterpret_cast<const void*>(text.begin())
-                << "+" << text.size();
+  // Sanity check: make sure that text lies within context.
+  if (text.begin() < context.begin() || text.end() > context.end()) {
+    LOG(DFATAL) << "context does not contain text";
     return false;
   }
 
@@ -445,16 +476,13 @@
 
   match_ = new const char*[ncapture_];
   matched_ = false;
-  memset(match_, 0, ncapture_*sizeof match_[0]);
 
   // For debugging prints.
   btext_ = context.begin();
 
-  if (Debug) {
+  if (ExtraDebug)
     fprintf(stderr, "NFA::Search %s (context: %s) anchored=%d longest=%d\n",
-            text.as_string().c_str(), context.as_string().c_str(), anchored,
-            longest);
-  }
+            string(text).c_str(), string(context).c_str(), anchored, longest);
 
   // Set up search.
   Threadq* runq = &q0_;
@@ -462,60 +490,32 @@
   runq->clear();
   nextq->clear();
   memset(&match_[0], 0, ncapture_*sizeof match_[0]);
-  const char* bp = context.begin();
-  int c = -1;
-  int wasword = 0;
-
-  if (text.begin() > context.begin()) {
-    c = text.begin()[-1] & 0xFF;
-    wasword = Prog::IsWordChar(c);
-  }
 
   // Loop over the text, stepping the machine.
   for (const char* p = text.begin();; p++) {
-    // Check for empty-width specials.
-    int flag = 0;
+    if (ExtraDebug) {
+      int c = 0;
+      if (p == context.begin())
+        c = '^';
+      else if (p > text.end())
+        c = '$';
+      else if (p < text.end())
+        c = p[0] & 0xFF;
 
-    // ^ and \A
-    if (p == context.begin())
-      flag |= kEmptyBeginText | kEmptyBeginLine;
-    else if (p <= context.end() && p[-1] == '\n')
-      flag |= kEmptyBeginLine;
-
-    // $ and \z
-    if (p == context.end())
-      flag |= kEmptyEndText | kEmptyEndLine;
-    else if (p < context.end() && p[0] == '\n')
-      flag |= kEmptyEndLine;
-
-    // \b and \B
-    int isword = 0;
-    if (p < context.end())
-      isword = Prog::IsWordChar(p[0] & 0xFF);
-
-    if (isword != wasword)
-      flag |= kEmptyWordBoundary;
-    else
-      flag |= kEmptyNonWordBoundary;
-
-    if (Debug) {
-      fprintf(stderr, "%c[%#x/%d/%d]:", p > text.end() ? '$' : p == bp ? '^' : c, flag, isword, wasword);
+      fprintf(stderr, "%c:", c);
       for (Threadq::iterator i = runq->begin(); i != runq->end(); ++i) {
-        Thread* t = i->second;
+        Thread* t = i->value();
         if (t == NULL)
           continue;
-        fprintf(stderr, " %d%s", t->id,
-                FormatCapture((const char**)t->capture).c_str());
+        fprintf(stderr, " %d%s", i->index(), FormatCapture(t->capture).c_str());
       }
       fprintf(stderr, "\n");
     }
 
-    // Process previous character (waited until now to avoid
-    // repeating the flag computation above).
-    // This is a no-op the first time around the loop, because
-    // runq is empty.
-    int id = Step(runq, nextq, c, flag, p-1);
+    // This is a no-op the first time around the loop because runq is empty.
+    int id = Step(runq, nextq, p < text.end() ? p[0] & 0xFF : -1, context, p);
     DCHECK_EQ(runq->size(), 0);
+    using std::swap;
     swap(nextq, runq);
     nextq->clear();
     if (id != 0) {
@@ -529,7 +529,8 @@
             break;
 
           case kInstCapture:
-            match_[ip->cap()] = p;
+            if (ip->cap() < ncapture_)
+              match_[ip->cap()] = p;
             id = ip->out();
             continue;
 
@@ -541,14 +542,6 @@
             match_[1] = p;
             matched_ = true;
             break;
-
-          case kInstEmptyWidth:
-            if (ip->empty() & ~(kEmptyEndLine|kEmptyEndText)) {
-              LOG(DFATAL) << "Unexpected empty-width in short circuit: " << ip->empty();
-              break;
-            }
-            id = ip->out();
-            continue;
         }
         break;
       }
@@ -566,72 +559,56 @@
       // If there's a required first byte for an unanchored search
       // and we're not in the middle of any possible matches,
       // use memchr to search for the byte quickly.
-      if (!anchored && first_byte_ >= 0 && runq->size() == 0 &&
-          p < text.end() && (p[0] & 0xFF) != first_byte_) {
-        p = reinterpret_cast<const char*>(memchr(p, first_byte_,
-                                                 text.end() - p));
+      int fb = prog_->first_byte();
+      if (!anchored && runq->size() == 0 &&
+          fb >= 0 && p < text.end() && (p[0] & 0xFF) != fb) {
+        p = reinterpret_cast<const char*>(memchr(p, fb, text.end() - p));
         if (p == NULL) {
           p = text.end();
-          isword = 0;
-        } else {
-          isword = Prog::IsWordChar(p[0] & 0xFF);
         }
-        flag = Prog::EmptyFlags(context, p);
       }
 
-      // Steal match storage (cleared but unused as of yet)
-      // temporarily to hold match boundaries for new thread.
-      match_[0] = p;
-      AddToThreadq(runq, start_, flag, p, match_);
-      match_[0] = NULL;
+      Thread* t = AllocThread();
+      CopyCapture(t->capture, match_);
+      t->capture[0] = p;
+      AddToThreadq(runq, start_, p < text.end() ? p[0] & 0xFF : -1, context, p,
+                   t);
+      Decref(t);
     }
 
     // If all the threads have died, stop early.
     if (runq->size() == 0) {
-      if (Debug)
+      if (ExtraDebug)
         fprintf(stderr, "dead\n");
       break;
     }
-
-    if (p == text.end())
-      c = 0;
-    else
-      c = *p & 0xFF;
-    wasword = isword;
-
-    // Will run step(runq, nextq, c, ...) on next iteration.  See above.
   }
 
   for (Threadq::iterator i = runq->begin(); i != runq->end(); ++i)
-    FreeThread(i->second);
+    Decref(i->value());
 
   if (matched_) {
     for (int i = 0; i < nsubmatch; i++)
-      submatch[i].set(match_[2*i], match_[2*i+1] - match_[2*i]);
-    if (Debug)
-      fprintf(stderr, "match (%d,%d)\n",
-              static_cast<int>(match_[0] - btext_),
-              static_cast<int>(match_[1] - btext_));
+      submatch[i] =
+          StringPiece(match_[2 * i],
+                      static_cast<size_t>(match_[2 * i + 1] - match_[2 * i]));
+    if (ExtraDebug)
+      fprintf(stderr, "match (%td,%td)\n",
+              match_[0] - btext_, match_[1] - btext_);
     return true;
   }
-  VLOG(1) << "No matches found";
   return false;
 }
 
 // Computes whether all successful matches have a common first byte,
 // and if so, returns that byte.  If not, returns -1.
-int NFA::ComputeFirstByte() {
-  if (start_ == 0)
-    return -1;
-
-  int b = -1;  // first byte, not yet computed
-
-  typedef SparseSet Workq;
-  Workq q(prog_->size());
-  q.insert(start_);
-  for (Workq::iterator it = q.begin(); it != q.end(); ++it) {
+int Prog::ComputeFirstByte() {
+  int b = -1;
+  SparseSet q(size());
+  q.insert(start());
+  for (SparseSet::iterator it = q.begin(); it != q.end(); ++it) {
     int id = *it;
-    Prog::Inst* ip = prog_->inst(id);
+    Prog::Inst* ip = inst(id);
     switch (ip->opcode()) {
       default:
         LOG(DFATAL) << "unhandled " << ip->opcode() << " in ComputeFirstByte";
@@ -642,6 +619,9 @@
         return -1;
 
       case kInstByteRange:
+        if (!ip->last())
+          q.insert(id+1);
+
         // Must match only a single byte
         if (ip->lo() != ip->hi())
           return -1;
@@ -658,6 +638,9 @@
       case kInstNop:
       case kInstCapture:
       case kInstEmptyWidth:
+        if (!ip->last())
+          q.insert(id+1);
+
         // Continue on.
         // Ignore ip->empty() flags for kInstEmptyWidth
         // in order to be as conservative as possible
@@ -666,13 +649,9 @@
           q.insert(ip->out());
         break;
 
-      case kInstAlt:
       case kInstAltMatch:
-        // Explore alternatives.
-        if (ip->out())
-          q.insert(ip->out());
-        if (ip->out1())
-          q.insert(ip->out1());
+        DCHECK(!ip->last());
+        q.insert(id+1);
         break;
 
       case kInstFail:
@@ -686,7 +665,7 @@
 Prog::SearchNFA(const StringPiece& text, const StringPiece& context,
                 Anchor anchor, MatchKind kind,
                 StringPiece* match, int nmatch) {
-  if (NFA::Debug)
+  if (ExtraDebug)
     Dump();
 
   NFA nfa(this);
@@ -705,5 +684,63 @@
   return true;
 }
 
-}  // namespace re2
+// For each instruction i in the program reachable from the start, compute the
+// number of instructions reachable from i by following only empty transitions
+// and record that count as fanout[i].
+//
+// fanout holds the results and is also the work queue for the outer iteration.
+// reachable holds the reached nodes for the inner iteration.
+void Prog::Fanout(SparseArray<int>* fanout) {
+  DCHECK_EQ(fanout->max_size(), size());
+  SparseSet reachable(size());
+  fanout->clear();
+  fanout->set_new(start(), 0);
+  for (SparseArray<int>::iterator i = fanout->begin(); i != fanout->end(); ++i) {
+    int* count = &i->value();
+    reachable.clear();
+    reachable.insert(i->index());
+    for (SparseSet::iterator j = reachable.begin(); j != reachable.end(); ++j) {
+      int id = *j;
+      Prog::Inst* ip = inst(id);
+      switch (ip->opcode()) {
+        default:
+          LOG(DFATAL) << "unhandled " << ip->opcode() << " in Prog::Fanout()";
+          break;
 
+        case kInstByteRange:
+          if (!ip->last())
+            reachable.insert(id+1);
+
+          (*count)++;
+          if (!fanout->has_index(ip->out())) {
+            fanout->set_new(ip->out(), 0);
+          }
+          break;
+
+        case kInstAltMatch:
+          DCHECK(!ip->last());
+          reachable.insert(id+1);
+          break;
+
+        case kInstCapture:
+        case kInstEmptyWidth:
+        case kInstNop:
+          if (!ip->last())
+            reachable.insert(id+1);
+
+          reachable.insert(ip->out());
+          break;
+
+        case kInstMatch:
+          if (!ip->last())
+            reachable.insert(id+1);
+          break;
+
+        case kInstFail:
+          break;
+      }
+    }
+  }
+}
+
+}  // namespace re2
diff --git a/re2/onepass.cc b/re2/onepass.cc
index 1c49988..7d39290 100644
--- a/re2/onepass.cc
+++ b/re2/onepass.cc
@@ -50,17 +50,30 @@
 // See also Anne Brüggemann-Klein and Derick Wood,
 // "One-unambiguous regular languages", Information and Computation 142(2).
 
+#include <stdint.h>
 #include <string.h>
+#include <algorithm>
 #include <map>
+#include <string>
+#include <vector>
+
 #include "util/util.h"
-#include "util/arena.h"
+#include "util/logging.h"
+#include "util/pod_array.h"
 #include "util/sparse_set.h"
+#include "util/strutil.h"
+#include "util/utf.h"
 #include "re2/prog.h"
 #include "re2/stringpiece.h"
 
+// Silence "zero-sized array in struct/union" warning for OneState::action.
+#ifdef _MSC_VER
+#pragma warning(disable: 4200)
+#endif
+
 namespace re2 {
 
-static const int Debug = 0;
+static const bool ExtraDebug = false;
 
 // The key insight behind this implementation is that the
 // non-determinism in an NFA for a one-pass regular expression
@@ -126,19 +139,16 @@
 // whether a set of conditions required to finish a match at that
 // point in the input rather than process the next byte.
 
-// A state in the one-pass NFA (aka DFA) - just an array of actions.
-struct OneState;
-
 // A state in the one-pass NFA - just an array of actions indexed
 // by the bytemap_[] of the next input byte.  (The bytemap
 // maps next input bytes into equivalence classes, to reduce
 // the memory footprint.)
 struct OneState {
-  uint32 matchcond;   // conditions to match right now.
-  uint32 action[1];
+  uint32_t matchcond;   // conditions to match right now.
+  uint32_t action[];
 };
 
-// The uint32 conditions in the action are a combination of
+// The uint32_t conditions in the action are a combination of
 // condition and capture bits and the next state.  The bottom 16 bits
 // are the condition and capture bits, and the top 16 are the index of
 // the next state.
@@ -155,8 +165,8 @@
 // and kEmptyNonWordBoundary, so we can use that as a sentinel
 // instead of needing an extra bit.
 
-static const int    kIndexShift    = 16;  // number of bits below index
-static const int    kEmptyShift   = 6;  // number of empty flags in prog.h
+static const int    kIndexShift   = 16;  // number of bits below index
+static const int    kEmptyShift   = 6;   // number of empty flags in prog.h
 static const int    kRealCapShift = kEmptyShift + 1;
 static const int    kRealMaxCap   = (kIndexShift - kRealCapShift) / 2 * 2;
 
@@ -164,23 +174,23 @@
 static const int    kCapShift     = kRealCapShift - 2;
 static const int    kMaxCap       = kRealMaxCap + 2;
 
-static const uint32 kMatchWins    = 1 << kEmptyShift;
-static const uint32 kCapMask      = ((1 << kRealMaxCap) - 1) << kRealCapShift;
+static const uint32_t kMatchWins  = 1 << kEmptyShift;
+static const uint32_t kCapMask    = ((1 << kRealMaxCap) - 1) << kRealCapShift;
 
-static const uint32 kImpossible   = kEmptyWordBoundary | kEmptyNonWordBoundary;
+static const uint32_t kImpossible = kEmptyWordBoundary | kEmptyNonWordBoundary;
 
 // Check, at compile time, that prog.h agrees with math above.
 // This function is never called.
 void OnePass_Checks() {
-  COMPILE_ASSERT((1<<kEmptyShift)-1 == kEmptyAllFlags,
-                 kEmptyShift_disagrees_with_kEmptyAllFlags);
+  static_assert((1<<kEmptyShift)-1 == kEmptyAllFlags,
+                "kEmptyShift disagrees with kEmptyAllFlags");
   // kMaxCap counts pointers, kMaxOnePassCapture counts pairs.
-  COMPILE_ASSERT(kMaxCap == Prog::kMaxOnePassCapture*2,
-                 kMaxCap_disagrees_with_kMaxOnePassCapture);
+  static_assert(kMaxCap == Prog::kMaxOnePassCapture*2,
+                "kMaxCap disagrees with kMaxOnePassCapture");
 }
 
-static bool Satisfy(uint32 cond, const StringPiece& context, const char* p) {
-  uint32 satisfied = Prog::EmptyFlags(context, p);
+static bool Satisfy(uint32_t cond, const StringPiece& context, const char* p) {
+  uint32_t satisfied = Prog::EmptyFlags(context, p);
   if (cond & kEmptyAllFlags & ~satisfied)
     return false;
   return true;
@@ -188,20 +198,17 @@
 
 // Apply the capture bits in cond, saving p to the appropriate
 // locations in cap[].
-static void ApplyCaptures(uint32 cond, const char* p,
+static void ApplyCaptures(uint32_t cond, const char* p,
                           const char** cap, int ncap) {
   for (int i = 2; i < ncap; i++)
     if (cond & (1 << kCapShift << i))
       cap[i] = p;
 }
 
-// Compute a node pointer.
-// Basically (OneState*)(nodes + statesize*nodeindex)
-// but the version with the C++ casts overflows 80 characters (and is ugly).
-static inline OneState* IndexToNode(volatile uint8* nodes, int statesize,
+// Computes the OneState* for the given nodeindex.
+static inline OneState* IndexToNode(uint8_t* nodes, int statesize,
                                     int nodeindex) {
-  return reinterpret_cast<OneState*>(
-    const_cast<uint8*>(nodes + statesize*nodeindex));
+  return reinterpret_cast<OneState*>(nodes + statesize*nodeindex);
 }
 
 bool Prog::SearchOnePass(const StringPiece& text,
@@ -237,30 +244,27 @@
   if (anchor_end())
     kind = kFullMatch;
 
-  // State and act are marked volatile to
-  // keep the compiler from re-ordering the
-  // memory accesses walking over the NFA.
-  // This is worth about 5%.
-  volatile OneState* state = onepass_start_;
-  volatile uint8* nodes = onepass_nodes_;
-  volatile uint32 statesize = onepass_statesize_;
-  uint8* bytemap = bytemap_;
+  uint8_t* nodes = onepass_nodes_;
+  int statesize = sizeof(OneState) + bytemap_range()*sizeof(uint32_t);
+  // start() is always mapped to the zeroth OneState.
+  OneState* state = IndexToNode(nodes, statesize, 0);
+  uint8_t* bytemap = bytemap_;
   const char* bp = text.begin();
   const char* ep = text.end();
   const char* p;
   bool matched = false;
   matchcap[0] = bp;
   cap[0] = bp;
-  uint32 nextmatchcond = state->matchcond;
+  uint32_t nextmatchcond = state->matchcond;
   for (p = bp; p < ep; p++) {
     int c = bytemap[*p & 0xFF];
-    uint32 matchcond = nextmatchcond;
-    uint32 cond = state->action[c];
+    uint32_t matchcond = nextmatchcond;
+    uint32_t cond = state->action[c];
 
     // Determine whether we can reach act->next.
     // If so, advance state and nextmatchcond.
     if ((cond & kEmptyAllFlags) == 0 || Satisfy(cond, context, p)) {
-      uint32 nextindex = cond >> kIndexShift;
+      uint32_t nextindex = cond >> kIndexShift;
       state = IndexToNode(nodes, statesize, nextindex);
       nextmatchcond = state->matchcond;
     } else {
@@ -319,7 +323,7 @@
 
   // Look for match at end of input.
   {
-    uint32 matchcond = state->matchcond;
+    uint32_t matchcond = state->matchcond;
     if (matchcond != kImpossible &&
         ((matchcond & kEmptyAllFlags) == 0 || Satisfy(matchcond, context, p))) {
       if (nmatch > 1 && (matchcond & kCapMask))
@@ -335,7 +339,9 @@
   if (!matched)
     return false;
   for (int i = 0; i < nmatch; i++)
-    match[i].set(matchcap[2*i], matchcap[2*i+1] - matchcap[2*i]);
+    match[i] =
+        StringPiece(matchcap[2 * i],
+                    static_cast<size_t>(matchcap[2 * i + 1] - matchcap[2 * i]));
   return true;
 }
 
@@ -357,7 +363,7 @@
 
 struct InstCond {
   int id;
-  uint32 cond;
+  uint32_t cond;
 };
 
 // Returns whether this is a one-pass program; that is,
@@ -377,7 +383,7 @@
 // Constructs and saves corresponding one-pass NFA on success.
 bool Prog::IsOnePass() {
   if (did_onepass_)
-    return onepass_start_ != NULL;
+    return onepass_nodes_ != NULL;
   did_onepass_ = true;
 
   if (start() == 0)  // no match
@@ -387,32 +393,37 @@
   // Willing to use at most 1/4 of the DFA budget (heuristic).
   // Limit max node count to 65000 as a conservative estimate to
   // avoid overflowing 16-bit node index in encoding.
-  int maxnodes = 2 + byte_inst_count_;
-  int statesize = sizeof(OneState) + (bytemap_range_-1)*sizeof(uint32);
+  int maxnodes = 2 + inst_count(kInstByteRange);
+  int statesize = sizeof(OneState) + bytemap_range()*sizeof(uint32_t);
   if (maxnodes >= 65000 || dfa_mem_ / 4 / statesize < maxnodes)
     return false;
 
   // Flood the graph starting at the start state, and check
   // that in each reachable state, each possible byte leads
   // to a unique next state.
+  int stacksize = inst_count(kInstCapture) +
+                  inst_count(kInstEmptyWidth) +
+                  inst_count(kInstNop) + 1;  // + 1 for start inst
+  PODArray<InstCond> stack(stacksize);
+
   int size = this->size();
-  InstCond *stack = new InstCond[size];
+  PODArray<int> nodebyid(size);  // indexed by ip
+  memset(nodebyid.data(), 0xFF, size*sizeof nodebyid[0]);
 
-  int* nodebyid = new int[size];  // indexed by ip
-  memset(nodebyid, 0xFF, size*sizeof nodebyid[0]);
-
-  uint8* nodes = new uint8[maxnodes*statesize];
-  uint8* nodep = nodes;
+  // Originally, nodes was a uint8_t[maxnodes*statesize], but that was
+  // unnecessarily optimistic: why allocate a large amount of memory
+  // upfront for a large program when it is unlikely to be one-pass?
+  std::vector<uint8_t> nodes;
 
   Instq tovisit(size), workq(size);
   AddQ(&tovisit, start());
   nodebyid[start()] = 0;
-  nodep += statesize;
   int nalloc = 1;
+  nodes.insert(nodes.end(), statesize, 0);
   for (Instq::iterator it = tovisit.begin(); it != tovisit.end(); ++it) {
     int id = *it;
     int nodeindex = nodebyid[id];
-    OneState* node = IndexToNode(nodes, statesize, nodeindex);
+    OneState* node = IndexToNode(nodes.data(), statesize, nodeindex);
 
     // Flood graph using manual stack, filling in actions as found.
     // Default is none.
@@ -427,93 +438,108 @@
     stack[nstack++].cond = 0;
     while (nstack > 0) {
       int id = stack[--nstack].id;
+      uint32_t cond = stack[nstack].cond;
+
+    Loop:
       Prog::Inst* ip = inst(id);
-      uint32 cond = stack[nstack].cond;
       switch (ip->opcode()) {
+        default:
+          LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
+          break;
+
         case kInstAltMatch:
           // TODO(rsc): Ignoring kInstAltMatch optimization.
           // Should implement it in this engine, but it's subtle.
-          // Fall through.
-        case kInstAlt:
+          DCHECK(!ip->last());
           // If already on work queue, (1) is violated: bail out.
-          if (!AddQ(&workq, ip->out()) || !AddQ(&workq, ip->out1()))
+          if (!AddQ(&workq, id+1))
             goto fail;
-          stack[nstack].id = ip->out1();
-          stack[nstack++].cond = cond;
-          stack[nstack].id = ip->out();
-          stack[nstack++].cond = cond;
-          break;
+          id = id+1;
+          goto Loop;
 
         case kInstByteRange: {
           int nextindex = nodebyid[ip->out()];
           if (nextindex == -1) {
             if (nalloc >= maxnodes) {
-              if (Debug)
-                LOG(ERROR)
-                  << StringPrintf("Not OnePass: hit node limit %d > %d",
-                                  nalloc, maxnodes);
+              if (ExtraDebug)
+                LOG(ERROR) << StringPrintf(
+                    "Not OnePass: hit node limit %d >= %d", nalloc, maxnodes);
               goto fail;
             }
             nextindex = nalloc;
-            nodep += statesize;
-            nodebyid[ip->out()] = nextindex;
-            nalloc++;
             AddQ(&tovisit, ip->out());
+            nodebyid[ip->out()] = nalloc;
+            nalloc++;
+            nodes.insert(nodes.end(), statesize, 0);
+            // Update node because it might have been invalidated.
+            node = IndexToNode(nodes.data(), statesize, nodeindex);
           }
-          if (matched)
-            cond |= kMatchWins;
           for (int c = ip->lo(); c <= ip->hi(); c++) {
             int b = bytemap_[c];
-            c = unbytemap_[b];  // last c in byte class
-            uint32 act = node->action[b];
-            uint32 newact = (nextindex << kIndexShift) | cond;
+            // Skip any bytes immediately after c that are also in b.
+            while (c < 256-1 && bytemap_[c+1] == b)
+              c++;
+            uint32_t act = node->action[b];
+            uint32_t newact = (nextindex << kIndexShift) | cond;
+            if (matched)
+              newact |= kMatchWins;
             if ((act & kImpossible) == kImpossible) {
               node->action[b] = newact;
             } else if (act != newact) {
-              if (Debug) {
-                LOG(ERROR)
-                  << StringPrintf("Not OnePass: conflict on byte "
-                                  "%#x at state %d",
-                                  c, *it);
-              }
+              if (ExtraDebug)
+                LOG(ERROR) << StringPrintf(
+                    "Not OnePass: conflict on byte %#x at state %d", c, *it);
               goto fail;
             }
           }
           if (ip->foldcase()) {
-            Rune lo = max<Rune>(ip->lo(), 'a') + 'A' - 'a';
-            Rune hi = min<Rune>(ip->hi(), 'z') + 'A' - 'a';
+            Rune lo = std::max<Rune>(ip->lo(), 'a') + 'A' - 'a';
+            Rune hi = std::min<Rune>(ip->hi(), 'z') + 'A' - 'a';
             for (int c = lo; c <= hi; c++) {
               int b = bytemap_[c];
-              c = unbytemap_[b];  // last c in class
-              uint32 act = node->action[b];
-              uint32 newact = (nextindex << kIndexShift) | cond;
+              // Skip any bytes immediately after c that are also in b.
+              while (c < 256-1 && bytemap_[c+1] == b)
+                c++;
+              uint32_t act = node->action[b];
+              uint32_t newact = (nextindex << kIndexShift) | cond;
+              if (matched)
+                newact |= kMatchWins;
               if ((act & kImpossible) == kImpossible) {
                 node->action[b] = newact;
               } else if (act != newact) {
-                if (Debug) {
-                  LOG(ERROR)
-                    << StringPrintf("Not OnePass: conflict on byte "
-                                    "%#x at state %d",
-                                    c, *it);
-                }
+                if (ExtraDebug)
+                  LOG(ERROR) << StringPrintf(
+                      "Not OnePass: conflict on byte %#x at state %d", c, *it);
                 goto fail;
               }
             }
           }
-          break;
+
+          if (ip->last())
+            break;
+          // If already on work queue, (1) is violated: bail out.
+          if (!AddQ(&workq, id+1))
+            goto fail;
+          id = id+1;
+          goto Loop;
         }
 
         case kInstCapture:
-          if (ip->cap() < kMaxCap)
-            cond |= (1 << kCapShift) << ip->cap();
-          goto QueueEmpty;
-
         case kInstEmptyWidth:
-          cond |= ip->empty();
-          goto QueueEmpty;
-
         case kInstNop:
-        QueueEmpty:
+          if (!ip->last()) {
+            // If already on work queue, (1) is violated: bail out.
+            if (!AddQ(&workq, id+1))
+              goto fail;
+            stack[nstack].id = id+1;
+            stack[nstack++].cond = cond;
+          }
+
+          if (ip->opcode() == kInstCapture && ip->cap() < kMaxCap)
+            cond |= (1 << kCapShift) << ip->cap();
+          if (ip->opcode() == kInstEmptyWidth)
+            cond |= ip->empty();
+
           // kInstCapture and kInstNop always proceed to ip->out().
           // kInstEmptyWidth only sometimes proceeds to ip->out(),
           // but as a conservative approximation we assume it always does.
@@ -522,29 +548,32 @@
 
           // If already on work queue, (1) is violated: bail out.
           if (!AddQ(&workq, ip->out())) {
-            if (Debug) {
-              LOG(ERROR) << StringPrintf("Not OnePass: multiple paths"
-                                         " %d -> %d\n",
-                                         *it, ip->out());
-            }
+            if (ExtraDebug)
+              LOG(ERROR) << StringPrintf(
+                  "Not OnePass: multiple paths %d -> %d\n", *it, ip->out());
             goto fail;
           }
-          stack[nstack].id = ip->out();
-          stack[nstack++].cond = cond;
-          break;
+          id = ip->out();
+          goto Loop;
 
         case kInstMatch:
           if (matched) {
             // (3) is violated
-            if (Debug) {
-              LOG(ERROR) << StringPrintf("Not OnePass: multiple matches"
-                                         " from %d\n", *it);
-            }
+            if (ExtraDebug)
+              LOG(ERROR) << StringPrintf(
+                  "Not OnePass: multiple matches from %d\n", *it);
             goto fail;
           }
           matched = true;
           node->matchcond = cond;
-          break;
+
+          if (ip->last())
+            break;
+          // If already on work queue, (1) is violated: bail out.
+          if (!AddQ(&workq, id+1))
+            goto fail;
+          id = id+1;
+          goto Loop;
 
         case kInstFail:
           break;
@@ -552,29 +581,22 @@
     }
   }
 
-  if (Debug) {  // For debugging, dump one-pass NFA to LOG(ERROR).
-    string dump = "prog dump:\n" + Dump() + "node dump\n";
-    map<int, int> idmap;
+  if (ExtraDebug) {  // For debugging, dump one-pass NFA to LOG(ERROR).
+    LOG(ERROR) << "bytemap:\n" << DumpByteMap();
+    LOG(ERROR) << "prog:\n" << Dump();
+
+    std::map<int, int> idmap;
     for (int i = 0; i < size; i++)
       if (nodebyid[i] != -1)
         idmap[nodebyid[i]] = i;
 
-    StringAppendF(&dump, "byte ranges:\n");
-    int i = 0;
-    for (int b = 0; b < bytemap_range_; b++) {
-      int lo = i;
-      while (bytemap_[i] == b)
-        i++;
-      StringAppendF(&dump, "\t%d: %#x-%#x\n", b, lo, i - 1);
-    }
-
+    string dump;
     for (Instq::iterator it = tovisit.begin(); it != tovisit.end(); ++it) {
       int id = *it;
       int nodeindex = nodebyid[id];
       if (nodeindex == -1)
-      	continue;
-      OneState* node = IndexToNode(nodes, statesize, nodeindex);
-      string s;
+        continue;
+      OneState* node = IndexToNode(nodes.data(), statesize, nodeindex);
       StringAppendF(&dump, "node %d id=%d: matchcond=%#x\n",
                     nodeindex, id, node->matchcond);
       for (int i = 0; i < bytemap_range_; i++) {
@@ -586,28 +608,15 @@
                       idmap[node->action[i] >> kIndexShift]);
       }
     }
-    LOG(ERROR) << dump;
+    LOG(ERROR) << "nodes:\n" << dump;
   }
 
-  // Overallocated earlier; cut down to actual size.
-  nodep = new uint8[nalloc*statesize];
-  memmove(nodep, nodes, nalloc*statesize);
-  delete[] nodes;
-  nodes = nodep;
-
-  onepass_start_ = IndexToNode(nodes, statesize, nodebyid[start()]);
-  onepass_nodes_ = nodes;
-  onepass_statesize_ = statesize;
   dfa_mem_ -= nalloc*statesize;
-
-  delete[] stack;
-  delete[] nodebyid;
+  onepass_nodes_ = new uint8_t[nalloc*statesize];
+  memmove(onepass_nodes_, nodes.data(), nalloc*statesize);
   return true;
 
 fail:
-  delete[] stack;
-  delete[] nodebyid;
-  delete[] nodes;
   return false;
 }
 
diff --git a/re2/parse.cc b/re2/parse.cc
index 0cf4ab4..c8dea7e 100644
--- a/re2/parse.cc
+++ b/re2/parse.cc
@@ -16,14 +16,41 @@
 // and recognizes the Perl escape sequences \d, \s, \w, \D, \S, and \W.
 // See regexp.h for rationale.
 
+#include <ctype.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <algorithm>
+#include <map>
+#include <string>
+#include <vector>
+
 #include "util/util.h"
+#include "util/logging.h"
+#include "util/pod_array.h"
+#include "util/strutil.h"
+#include "util/utf.h"
 #include "re2/regexp.h"
 #include "re2/stringpiece.h"
 #include "re2/unicode_casefold.h"
 #include "re2/unicode_groups.h"
+#include "re2/walker-inl.h"
+
+#if defined(RE2_USE_ICU)
+#include "unicode/uniset.h"
+#include "unicode/unistr.h"
+#include "unicode/utypes.h"
+#endif
 
 namespace re2 {
 
+// Reduce the maximum repeat count by an order of magnitude when fuzzing.
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+static const int kMaxRepeat = 100;
+#else
+static const int kMaxRepeat = 1000;
+#endif
+
 // Regular expression parse state.
 // The list of parsed regexps so far is maintained as a vector of
 // Regexp pointers called the stack.  Left parenthesis and vertical
@@ -156,7 +183,8 @@
   int ncap_;  // number of capturing parens seen
   int rune_max_;  // maximum char value for this encoding
 
-  DISALLOW_EVIL_CONSTRUCTORS(ParseState);
+  ParseState(const ParseState&) = delete;
+  ParseState& operator=(const ParseState&) = delete;
 };
 
 // Pseudo-operators - only on parse stack.
@@ -214,7 +242,8 @@
   // single characters (e.g., [.] instead of \.), and some
   // analysis does better with fewer character classes.
   // Similarly, [Aa] can be rewritten as a literal A with ASCII case folding.
-  if (re->op_ == kRegexpCharClass) {
+  if (re->op_ == kRegexpCharClass && re->ccb_ != NULL) {
+    re->ccb_->RemoveAbove(rune_max_);
     if (re->ccb_->size() == 1) {
       Rune r = re->ccb_->begin()->lo;
       re->Decref();
@@ -240,8 +269,8 @@
 // Searches the case folding tables and returns the CaseFold* that contains r.
 // If there isn't one, returns the CaseFold* with smallest f->lo bigger than r.
 // If there isn't one, returns NULL.
-CaseFold* LookupCaseFold(CaseFold *f, int n, Rune r) {
-  CaseFold* ef = f + n;
+const CaseFold* LookupCaseFold(const CaseFold *f, int n, Rune r) {
+  const CaseFold* ef = f + n;
 
   // Binary search for entry containing r.
   while (n > 0) {
@@ -268,7 +297,7 @@
 }
 
 // Returns the result of applying the fold f to the rune r.
-Rune ApplyFold(CaseFold *f, Rune r) {
+Rune ApplyFold(const CaseFold *f, Rune r) {
   switch (f->delta) {
     default:
       return r + f->delta;
@@ -276,7 +305,7 @@
     case EvenOddSkip:  // even <-> odd but only applies to every other
       if ((r - f->lo) % 2)
         return r;
-      // fall through
+      FALLTHROUGH_INTENDED;
     case EvenOdd:  // even <-> odd
       if (r%2 == 0)
         return r + 1;
@@ -285,7 +314,7 @@
     case OddEvenSkip:  // odd <-> even but only applies to every other
       if ((r - f->lo) % 2)
         return r;
-      // fall through
+      FALLTHROUGH_INTENDED;
     case OddEven:  // odd <-> even
       if (r%2 == 1)
         return r + 1;
@@ -304,7 +333,7 @@
 //
 //   CycleFoldRune('?') = '?'
 Rune CycleFoldRune(Rune r) {
-  CaseFold* f = LookupCaseFold(unicode_casefold, num_unicode_casefold, r);
+  const CaseFold* f = LookupCaseFold(unicode_casefold, num_unicode_casefold, r);
   if (f == NULL || r < f->lo)
     return r;
   return ApplyFold(f, r);
@@ -327,7 +356,7 @@
     return;
 
   while (lo <= hi) {
-    CaseFold* f = LookupCaseFold(unicode_casefold, num_unicode_casefold, lo);
+    const CaseFold* f = LookupCaseFold(unicode_casefold, num_unicode_casefold, lo);
     if (f == NULL)  // lo has no fold, nor does anything above lo
       break;
     if (lo < f->lo) {  // lo has no fold; next rune with a fold is f->lo
@@ -338,7 +367,7 @@
     // Add in the result of folding the range lo - f->hi
     // and that range's fold, recursively.
     Rune lo1 = lo;
-    Rune hi1 = min<Rune>(hi, f->hi);
+    Rune hi1 = std::min<Rune>(hi, f->hi);
     switch (f->delta) {
       default:
         lo1 += f->delta;
@@ -377,7 +406,6 @@
       }
       r = CycleFoldRune(r);
     } while (r != r1);
-    re->ccb_->RemoveAbove(rune_max_);
     return PushRegexp(re);
   }
 
@@ -454,6 +482,23 @@
   Regexp::ParseFlags fl = flags_;
   if (nongreedy)
     fl = fl ^ NonGreedy;
+
+  // Squash **, ++ and ??. Regexp::Star() et al. handle this too, but
+  // they're mostly for use during simplification, not during parsing.
+  if (op == stacktop_->op() && fl == stacktop_->parse_flags())
+    return true;
+
+  // Squash *+, *?, +*, +?, ?* and ?+. They all squash to *, so because
+  // op is a repeat, we just have to check that stacktop_->op() is too,
+  // then adjust stacktop_.
+  if ((stacktop_->op() == kRegexpStar ||
+       stacktop_->op() == kRegexpPlus ||
+       stacktop_->op() == kRegexpQuest) &&
+      fl == stacktop_->parse_flags()) {
+    stacktop_->op_ = kRegexpStar;
+    return true;
+  }
+
   Regexp* re = new Regexp(op, fl);
   re->AllocSub(1);
   re->down_ = stacktop_->down_;
@@ -463,12 +508,66 @@
   return true;
 }
 
+// RepetitionWalker reports whether the repetition regexp is valid.
+// Valid means that the combination of the top-level repetition
+// and any inner repetitions does not exceed n copies of the
+// innermost thing.
+// This rewalks the regexp tree and is called for every repetition,
+// so we have to worry about inducing quadratic behavior in the parser.
+// We avoid this by only using RepetitionWalker when min or max >= 2.
+// In that case the depth of any >= 2 nesting can only get to 9 without
+// triggering a parse error, so each subtree can only be rewalked 9 times.
+class RepetitionWalker : public Regexp::Walker<int> {
+ public:
+  RepetitionWalker() {}
+  virtual int PreVisit(Regexp* re, int parent_arg, bool* stop);
+  virtual int PostVisit(Regexp* re, int parent_arg, int pre_arg,
+                        int* child_args, int nchild_args);
+  virtual int ShortVisit(Regexp* re, int parent_arg);
+
+ private:
+  RepetitionWalker(const RepetitionWalker&) = delete;
+  RepetitionWalker& operator=(const RepetitionWalker&) = delete;
+};
+
+int RepetitionWalker::PreVisit(Regexp* re, int parent_arg, bool* stop) {
+  int arg = parent_arg;
+  if (re->op() == kRegexpRepeat) {
+    int m = re->max();
+    if (m < 0) {
+      m = re->min();
+    }
+    if (m > 0) {
+      arg /= m;
+    }
+  }
+  return arg;
+}
+
+int RepetitionWalker::PostVisit(Regexp* re, int parent_arg, int pre_arg,
+                                int* child_args, int nchild_args) {
+  int arg = pre_arg;
+  for (int i = 0; i < nchild_args; i++) {
+    if (child_args[i] < arg) {
+      arg = child_args[i];
+    }
+  }
+  return arg;
+}
+
+int RepetitionWalker::ShortVisit(Regexp* re, int parent_arg) {
+  // This should never be called, since we use Walk and not
+  // WalkExponential.
+  LOG(DFATAL) << "RepetitionWalker::ShortVisit called";
+  return 0;
+}
+
 // Pushes a repetition regexp onto the stack.
 // A valid argument for the operator must already be on the stack.
 bool Regexp::ParseState::PushRepetition(int min, int max,
                                         const StringPiece& s,
                                         bool nongreedy) {
-  if ((max != -1 && max < min) || min > 1000 || max > 1000) {
+  if ((max != -1 && max < min) || min > kMaxRepeat || max > kMaxRepeat) {
     status_->set_code(kRegexpRepeatSize);
     status_->set_error_arg(s);
     return false;
@@ -488,8 +587,15 @@
   re->down_ = stacktop_->down_;
   re->sub()[0] = FinishRegexp(stacktop_);
   re->simple_ = re->ComputeSimple();
-
   stacktop_ = re;
+  if (min >= 2 || max >= 2) {
+    RepetitionWalker w;
+    if (w.Walk(stacktop_, kMaxRepeat) == 0) {
+      status_->set_code(kRegexpRepeatSize);
+      status_->set_error_arg(s);
+      return false;
+    }
+  }
   return true;
 }
 
@@ -504,7 +610,7 @@
   Regexp* re = new Regexp(kLeftParen, flags_);
   re->cap_ = ++ncap_;
   if (name.data() != NULL)
-    re->name_ = new string(name.as_string());
+    re->name_ = new string(name);
   return PushRegexp(re);
 }
 
@@ -515,13 +621,6 @@
   return PushRegexp(re);
 }
 
-// Adds r to cc, along with r's upper case if foldascii is set.
-static void AddLiteral(CharClassBuilder* cc, Rune r, bool foldascii) {
-  cc->AddRange(r, r);
-  if (foldascii && 'a' <= r && r <= 'z')
-    cc->AddRange(r + 'A' - 'a', r + 'A' - 'a');
-}
-
 // Processes a vertical bar in the input.
 bool Regexp::ParseState::DoVerticalBar() {
   MaybeConcatString(-1, NoParseFlags);
@@ -535,46 +634,34 @@
   Regexp* r1;
   Regexp* r2;
   if ((r1 = stacktop_) != NULL &&
-      (r2 = stacktop_->down_) != NULL &&
+      (r2 = r1->down_) != NULL &&
       r2->op() == kVerticalBar) {
-    // If above and below vertical bar are literal or char class,
-    // can merge into a single char class.
     Regexp* r3;
-    if ((r1->op() == kRegexpLiteral ||
-         r1->op() == kRegexpCharClass ||
-         r1->op() == kRegexpAnyChar) &&
-        (r3 = r2->down_) != NULL) {
-      Rune rune;
-      switch (r3->op()) {
-        case kRegexpLiteral:  // convert to char class
-          rune = r3->rune_;
-          r3->op_ = kRegexpCharClass;
-          r3->cc_ = NULL;
-          r3->ccb_ = new CharClassBuilder;
-          AddLiteral(r3->ccb_, rune, r3->parse_flags_ & Regexp::FoldCase);
-          // fall through
-        case kRegexpCharClass:
-          if (r1->op() == kRegexpLiteral)
-            AddLiteral(r3->ccb_, r1->rune_,
-                       r1->parse_flags_ & Regexp::FoldCase);
-          else if (r1->op() == kRegexpCharClass)
-            r3->ccb_->AddCharClass(r1->ccb_);
-          if (r1->op() == kRegexpAnyChar || r3->ccb_->full()) {
-            delete r3->ccb_;
-            r3->ccb_ = NULL;
-            r3->op_ = kRegexpAnyChar;
-          }
-          // fall through
-        case kRegexpAnyChar:
-          // pop r1
-          stacktop_ = r2;
-          r1->Decref();
-          return true;
-        default:
-          break;
+    if ((r3 = r2->down_) != NULL &&
+        (r1->op() == kRegexpAnyChar || r3->op() == kRegexpAnyChar)) {
+      // AnyChar is above or below the vertical bar. Let it subsume
+      // the other when the other is Literal, CharClass or AnyChar.
+      if (r3->op() == kRegexpAnyChar &&
+          (r1->op() == kRegexpLiteral ||
+           r1->op() == kRegexpCharClass ||
+           r1->op() == kRegexpAnyChar)) {
+        // Discard r1.
+        stacktop_ = r2;
+        r1->Decref();
+        return true;
+      }
+      if (r1->op() == kRegexpAnyChar &&
+          (r3->op() == kRegexpLiteral ||
+           r3->op() == kRegexpCharClass ||
+           r3->op() == kRegexpAnyChar)) {
+        // Rearrange the stack and discard r3.
+        r1->down_ = r3->down_;
+        r2->down_ = r1;
+        stacktop_ = r2;
+        r3->Decref();
+        return true;
       }
     }
-
     // Swap r1 below vertical bar (r2).
     r1->down_ = r2->down_;
     r2->down_ = r1;
@@ -780,59 +867,180 @@
   }
 }
 
+// In the context of factoring alternations, a Splice is: a factored prefix or
+// merged character class computed by one iteration of one round of factoring;
+// the span of subexpressions of the alternation to be "spliced" (i.e. removed
+// and replaced); and, for a factored prefix, the number of suffixes after any
+// factoring that might have subsequently been performed on them. For a merged
+// character class, there are no suffixes, of course, so the field is ignored.
+struct Splice {
+  Splice(Regexp* prefix, Regexp** sub, int nsub)
+      : prefix(prefix),
+        sub(sub),
+        nsub(nsub),
+        nsuffix(-1) {}
+
+  Regexp* prefix;
+  Regexp** sub;
+  int nsub;
+  int nsuffix;
+};
+
+// Named so because it is used to implement an explicit stack, a Frame is: the
+// span of subexpressions of the alternation to be factored; the current round
+// of factoring; any Splices computed; and, for a factored prefix, an iterator
+// to the next Splice to be factored (i.e. in another Frame) because suffixes.
+struct Frame {
+  Frame(Regexp** sub, int nsub)
+      : sub(sub),
+        nsub(nsub),
+        round(0) {}
+
+  Regexp** sub;
+  int nsub;
+  int round;
+  std::vector<Splice> splices;
+  int spliceidx;
+};
+
+// Bundled into a class for friend access to Regexp without needing to declare
+// (or define) Splice in regexp.h.
+class FactorAlternationImpl {
+ public:
+  static void Round1(Regexp** sub, int nsub,
+                     Regexp::ParseFlags flags,
+                     std::vector<Splice>* splices);
+  static void Round2(Regexp** sub, int nsub,
+                     Regexp::ParseFlags flags,
+                     std::vector<Splice>* splices);
+  static void Round3(Regexp** sub, int nsub,
+                     Regexp::ParseFlags flags,
+                     std::vector<Splice>* splices);
+};
+
 // Factors common prefixes from alternation.
 // For example,
 //     ABC|ABD|AEF|BCX|BCY
 // simplifies to
 //     A(B(C|D)|EF)|BC(X|Y)
-// which the normal parse state routines will further simplify to
+// and thence to
 //     A(B[CD]|EF)|BC[XY]
 //
 // Rewrites sub to contain simplified list to alternate and returns
 // the new length of sub.  Adjusts reference counts accordingly
 // (incoming sub[i] decremented, outgoing sub[i] incremented).
+int Regexp::FactorAlternation(Regexp** sub, int nsub, ParseFlags flags) {
+  std::vector<Frame> stk;
+  stk.emplace_back(sub, nsub);
 
-// It's too much of a pain to write this code with an explicit stack,
-// so instead we let the caller specify a maximum depth and
-// don't simplify beyond that.  There are around 15 words of local
-// variables and parameters in the frame, so allowing 8 levels
-// on a 64-bit machine is still less than a kilobyte of stack and
-// probably enough benefit for practical uses.
-const int kFactorAlternationMaxDepth = 8;
+  for (;;) {
+    auto& sub = stk.back().sub;
+    auto& nsub = stk.back().nsub;
+    auto& round = stk.back().round;
+    auto& splices = stk.back().splices;
+    auto& spliceidx = stk.back().spliceidx;
 
-int Regexp::FactorAlternation(
-    Regexp** sub, int n,
-    Regexp::ParseFlags altflags) {
-  return FactorAlternationRecursive(sub, n, altflags,
-                                    kFactorAlternationMaxDepth);
+    if (splices.empty()) {
+      // Advance to the next round of factoring. Note that this covers
+      // the initialised state: when splices is empty and round is 0.
+      round++;
+    } else if (spliceidx < static_cast<int>(splices.size())) {
+      // We have at least one more Splice to factor. Recurse logically.
+      stk.emplace_back(splices[spliceidx].sub, splices[spliceidx].nsub);
+      continue;
+    } else {
+      // We have no more Splices to factor. Apply them.
+      auto iter = splices.begin();
+      int out = 0;
+      for (int i = 0; i < nsub; ) {
+        // Copy until we reach where the next Splice begins.
+        while (sub + i < iter->sub)
+          sub[out++] = sub[i++];
+        switch (round) {
+          case 1:
+          case 2: {
+            // Assemble the Splice prefix and the suffixes.
+            Regexp* re[2];
+            re[0] = iter->prefix;
+            re[1] = Regexp::AlternateNoFactor(iter->sub, iter->nsuffix, flags);
+            sub[out++] = Regexp::Concat(re, 2, flags);
+            i += iter->nsub;
+            break;
+          }
+          case 3:
+            // Just use the Splice prefix.
+            sub[out++] = iter->prefix;
+            i += iter->nsub;
+            break;
+          default:
+            LOG(DFATAL) << "unknown round: " << round;
+            break;
+        }
+        // If we are done, copy until the end of sub.
+        if (++iter == splices.end()) {
+          while (i < nsub)
+            sub[out++] = sub[i++];
+        }
+      }
+      splices.clear();
+      nsub = out;
+      // Advance to the next round of factoring.
+      round++;
+    }
+
+    switch (round) {
+      case 1:
+        FactorAlternationImpl::Round1(sub, nsub, flags, &splices);
+        break;
+      case 2:
+        FactorAlternationImpl::Round2(sub, nsub, flags, &splices);
+        break;
+      case 3:
+        FactorAlternationImpl::Round3(sub, nsub, flags, &splices);
+        break;
+      case 4:
+        if (stk.size() == 1) {
+          // We are at the top of the stack. Just return.
+          return nsub;
+        } else {
+          // Pop the stack and set the number of suffixes.
+          // (Note that references will be invalidated!)
+          int nsuffix = nsub;
+          stk.pop_back();
+          stk.back().splices[stk.back().spliceidx].nsuffix = nsuffix;
+          ++stk.back().spliceidx;
+          continue;
+        }
+      default:
+        LOG(DFATAL) << "unknown round: " << round;
+        break;
+    }
+
+    // Set spliceidx depending on whether we have Splices to factor.
+    if (splices.empty() || round == 3) {
+      spliceidx = static_cast<int>(splices.size());
+    } else {
+      spliceidx = 0;
+    }
+  }
 }
 
-int Regexp::FactorAlternationRecursive(
-    Regexp** sub, int n,
-    Regexp::ParseFlags altflags,
-    int maxdepth) {
-
-  if (maxdepth <= 0)
-    return n;
-
+void FactorAlternationImpl::Round1(Regexp** sub, int nsub,
+                                   Regexp::ParseFlags flags,
+                                   std::vector<Splice>* splices) {
   // Round 1: Factor out common literal prefixes.
-  Rune *rune = NULL;
+  int start = 0;
+  Rune* rune = NULL;
   int nrune = 0;
   Regexp::ParseFlags runeflags = Regexp::NoParseFlags;
-  int start = 0;
-  int out = 0;
-  for (int i = 0; i <= n; i++) {
-    // Invariant: what was in sub[0:start] has been Decref'ed
-    // and that space has been reused for sub[0:out] (out <= start).
-    //
-    // Invariant: sub[start:i] consists of regexps that all begin
-    // with the string rune[0:nrune].
-
+  for (int i = 0; i <= nsub; i++) {
+    // Invariant: sub[start:i] consists of regexps that all
+    // begin with rune[0:nrune].
     Rune* rune_i = NULL;
     int nrune_i = 0;
     Regexp::ParseFlags runeflags_i = Regexp::NoParseFlags;
-    if (i < n) {
-      rune_i = LeadingString(sub[i], &nrune_i, &runeflags_i);
+    if (i < nsub) {
+      rune_i = Regexp::LeadingString(sub[i], &nrune_i, &runeflags_i);
       if (runeflags_i == runeflags) {
         int same = 0;
         while (same < nrune && same < nrune_i && rune[same] == rune_i[same])
@@ -846,109 +1054,121 @@
     }
 
     // Found end of a run with common leading literal string:
-    // sub[start:i] all begin with rune[0:nrune] but sub[i]
-    // does not even begin with rune[0].
-    //
-    // Factor out common string and append factored expression to sub[0:out].
+    // sub[start:i] all begin with rune[0:nrune],
+    // but sub[i] does not even begin with rune[0].
     if (i == start) {
       // Nothing to do - first iteration.
     } else if (i == start+1) {
       // Just one: don't bother factoring.
-      sub[out++] = sub[start];
     } else {
-      // Construct factored form: prefix(suffix1|suffix2|...)
-      Regexp* x[2];  // x[0] = prefix, x[1] = suffix1|suffix2|...
-      x[0] = LiteralString(rune, nrune, runeflags);
+      Regexp* prefix = Regexp::LiteralString(rune, nrune, runeflags);
       for (int j = start; j < i; j++)
-        RemoveLeadingString(sub[j], nrune);
-      int nn = FactorAlternationRecursive(sub + start, i - start, altflags,
-                                          maxdepth - 1);
-      x[1] = AlternateNoFactor(sub + start, nn, altflags);
-      sub[out++] = Concat(x, 2, altflags);
+        Regexp::RemoveLeadingString(sub[j], nrune);
+      splices->emplace_back(prefix, sub + start, i - start);
     }
 
-    // Prepare for next round (if there is one).
-    if (i < n) {
+    // Prepare for next iteration (if there is one).
+    if (i < nsub) {
       start = i;
       rune = rune_i;
       nrune = nrune_i;
       runeflags = runeflags_i;
     }
   }
-  n = out;
+}
 
-  // Round 2: Factor out common complex prefixes,
-  // just the first piece of each concatenation,
-  // whatever it is.  This is good enough a lot of the time.
-  start = 0;
-  out = 0;
+void FactorAlternationImpl::Round2(Regexp** sub, int nsub,
+                                   Regexp::ParseFlags flags,
+                                   std::vector<Splice>* splices) {
+  // Round 2: Factor out common simple prefixes,
+  // just the first piece of each concatenation.
+  // This will be good enough a lot of the time.
+  //
+  // Complex subexpressions (e.g. involving quantifiers)
+  // are not safe to factor because that collapses their
+  // distinct paths through the automaton, which affects
+  // correctness in some cases.
+  int start = 0;
   Regexp* first = NULL;
-  for (int i = 0; i <= n; i++) {
-    // Invariant: what was in sub[0:start] has been Decref'ed
-    // and that space has been reused for sub[0:out] (out <= start).
-    //
-    // Invariant: sub[start:i] consists of regexps that all begin with first.
-
+  for (int i = 0; i <= nsub; i++) {
+    // Invariant: sub[start:i] consists of regexps that all
+    // begin with first.
     Regexp* first_i = NULL;
-    if (i < n) {
-      first_i = LeadingRegexp(sub[i]);
-      if (first != NULL && Regexp::Equal(first, first_i)) {
+    if (i < nsub) {
+      first_i = Regexp::LeadingRegexp(sub[i]);
+      if (first != NULL &&
+          // first must be an empty-width op
+          // OR a char class, any char or any byte
+          // OR a fixed repeat of a literal, char class, any char or any byte.
+          (first->op() == kRegexpBeginLine ||
+           first->op() == kRegexpEndLine ||
+           first->op() == kRegexpWordBoundary ||
+           first->op() == kRegexpNoWordBoundary ||
+           first->op() == kRegexpBeginText ||
+           first->op() == kRegexpEndText ||
+           first->op() == kRegexpCharClass ||
+           first->op() == kRegexpAnyChar ||
+           first->op() == kRegexpAnyByte ||
+           (first->op() == kRegexpRepeat &&
+            first->min() == first->max() &&
+            (first->sub()[0]->op() == kRegexpLiteral ||
+             first->sub()[0]->op() == kRegexpCharClass ||
+             first->sub()[0]->op() == kRegexpAnyChar ||
+             first->sub()[0]->op() == kRegexpAnyByte))) &&
+          Regexp::Equal(first, first_i))
         continue;
-      }
     }
 
     // Found end of a run with common leading regexp:
-    // sub[start:i] all begin with first but sub[i] does not.
-    //
-    // Factor out common regexp and append factored expression to sub[0:out].
+    // sub[start:i] all begin with first,
+    // but sub[i] does not.
     if (i == start) {
       // Nothing to do - first iteration.
     } else if (i == start+1) {
       // Just one: don't bother factoring.
-      sub[out++] = sub[start];
     } else {
-      // Construct factored form: prefix(suffix1|suffix2|...)
-      Regexp* x[2];  // x[0] = prefix, x[1] = suffix1|suffix2|...
-      x[0] = first->Incref();
+      Regexp* prefix = first->Incref();
       for (int j = start; j < i; j++)
-        sub[j] = RemoveLeadingRegexp(sub[j]);
-      int nn = FactorAlternationRecursive(sub + start, i - start, altflags,
-                                   maxdepth - 1);
-      x[1] = AlternateNoFactor(sub + start, nn, altflags);
-      sub[out++] = Concat(x, 2, altflags);
+        sub[j] = Regexp::RemoveLeadingRegexp(sub[j]);
+      splices->emplace_back(prefix, sub + start, i - start);
     }
 
-    // Prepare for next round (if there is one).
-    if (i < n) {
+    // Prepare for next iteration (if there is one).
+    if (i < nsub) {
       start = i;
       first = first_i;
     }
   }
-  n = out;
+}
 
-  // Round 3: Collapse runs of single literals into character classes.
-  start = 0;
-  out = 0;
-  for (int i = 0; i <= n; i++) {
-    // Invariant: what was in sub[0:start] has been Decref'ed
-    // and that space has been reused for sub[0:out] (out <= start).
-    //
-    // Invariant: sub[start:i] consists of regexps that are either
-    // literal runes or character classes.
+void FactorAlternationImpl::Round3(Regexp** sub, int nsub,
+                                   Regexp::ParseFlags flags,
+                                   std::vector<Splice>* splices) {
+  // Round 3: Merge runs of literals and/or character classes.
+  int start = 0;
+  Regexp* first = NULL;
+  for (int i = 0; i <= nsub; i++) {
+    // Invariant: sub[start:i] consists of regexps that all
+    // are either literals (i.e. runes) or character classes.
+    Regexp* first_i = NULL;
+    if (i < nsub) {
+      first_i = sub[i];
+      if (first != NULL &&
+          (first->op() == kRegexpLiteral ||
+           first->op() == kRegexpCharClass) &&
+          (first_i->op() == kRegexpLiteral ||
+           first_i->op() == kRegexpCharClass))
+        continue;
+    }
 
-    if (i < n &&
-        (sub[i]->op() == kRegexpLiteral ||
-         sub[i]->op() == kRegexpCharClass))
-      continue;
-
-    // sub[i] is not a char or char class;
-    // emit char class for sub[start:i]...
+    // Found end of a run of Literal/CharClass:
+    // sub[start:i] all are either one or the other,
+    // but sub[i] is not.
     if (i == start) {
-      // Nothing to do.
+      // Nothing to do - first iteration.
     } else if (i == start+1) {
-      sub[out++] = sub[start];
+      // Just one: don't bother factoring.
     } else {
-      // Make new char class.
       CharClassBuilder ccb;
       for (int j = start; j < i; j++) {
         Regexp* re = sub[j];
@@ -964,31 +1184,16 @@
         }
         re->Decref();
       }
-      sub[out++] = NewCharClass(ccb.GetCharClass(), altflags);
+      Regexp* re = Regexp::NewCharClass(ccb.GetCharClass(), flags);
+      splices->emplace_back(re, sub + start, i - start);
     }
 
-    // ... and then emit sub[i].
-    if (i < n)
-      sub[out++] = sub[i];
-    start = i+1;
-  }
-  n = out;
-
-  // Round 4: Collapse runs of empty matches into single empty match.
-  start = 0;
-  out = 0;
-  for (int i = 0; i < n; i++) {
-    if (i + 1 < n &&
-        sub[i]->op() == kRegexpEmptyMatch &&
-        sub[i+1]->op() == kRegexpEmptyMatch) {
-      sub[i]->Decref();
-      continue;
+    // Prepare for next iteration (if there is one).
+    if (i < nsub) {
+      start = i;
+      first = first_i;
     }
-    sub[out++] = sub[i];
   }
-  n = out;
-
-  return n;
 }
 
 // Collapse the regexps on top of the stack, down to the
@@ -1013,7 +1218,7 @@
     return;
 
   // Construct op (alternation or concatenation), flattening op of op.
-  Regexp** subs = new Regexp*[n];
+  PODArray<Regexp*> subs(n);
   next = NULL;
   int i = n;
   for (sub = stacktop_; sub != NULL && !IsMarker(sub->op()); sub = next) {
@@ -1028,8 +1233,7 @@
     }
   }
 
-  Regexp* re = ConcatOrAlternate(op, subs, n, flags_, true);
-  delete[] subs;
+  Regexp* re = ConcatOrAlternate(op, subs.data(), n, flags_, true);
   re->simple_ = re->ComputeSimple();
   re->down_ = next;
   stacktop_ = re;
@@ -1105,7 +1309,7 @@
   if (r >= 0) {
     re1->op_ = kRegexpLiteral;
     re1->rune_ = r;
-    re1->parse_flags_ = flags;
+    re1->parse_flags_ = static_cast<uint16_t>(flags);
     return true;
   }
 
@@ -1116,9 +1320,8 @@
 
 // Lexing routines.
 
-// Parses a decimal integer, storing it in *n.
+// Parses a decimal integer, storing it in *np.
 // Sets *s to span the remainder of the string.
-// Sets *out_re to the regexp for the class.
 static bool ParseInteger(StringPiece* s, int* np) {
   if (s->size() == 0 || !isdigit((*s)[0] & 0xFF))
     return false;
@@ -1185,9 +1388,18 @@
 // Argument order is backwards from usual Google style
 // but consistent with chartorune.
 static int StringPieceToRune(Rune *r, StringPiece *sp, RegexpStatus* status) {
-  int n;
-  if (fullrune(sp->data(), sp->size())) {
-    n = chartorune(r, sp->data());
+  // fullrune() takes int, not size_t. However, it just looks
+  // at the leading byte and treats any length >= 4 the same.
+  if (fullrune(sp->data(), static_cast<int>(std::min(size_t{4}, sp->size())))) {
+    int n = chartorune(r, sp->data());
+    // Some copies of chartorune have a bug that accepts
+    // encodings of values in (10FFFF, 1FFFFF] as valid.
+    // Those values break the character class algorithm,
+    // which assumes Runemax is the largest rune.
+    if (*r > Runemax) {
+      n = 1;
+      *r = Runeerror;
+    }
     if (!(n == 1 && *r == Runeerror)) {  // no decoding error
       sp->remove_prefix(n);
       return n;
@@ -1195,7 +1407,7 @@
   }
 
   status->set_code(kRegexpBadUTF8);
-  status->set_error_arg(NULL);
+  status->set_error_arg(StringPiece());
   return -1;
 }
 
@@ -1239,12 +1451,12 @@
   if (s->size() < 1 || (*s)[0] != '\\') {
     // Should not happen - caller always checks.
     status->set_code(kRegexpInternalError);
-    status->set_error_arg(NULL);
+    status->set_error_arg(StringPiece());
     return false;
   }
   if (s->size() < 2) {
     status->set_code(kRegexpTrailingBackslash);
-    status->set_error_arg(NULL);
+    status->set_error_arg(StringPiece());
     return false;
   }
   Rune c, c1;
@@ -1275,7 +1487,7 @@
       // Single non-zero octal digit is a backreference; not supported.
       if (s->size() == 0 || (*s)[0] < '0' || (*s)[0] > '7')
         goto BadEscape;
-      // fall through
+      FALLTHROUGH_INTENDED;
     case '0':
       // consume up to three octal digits; already have one.
       code = c - '0';
@@ -1290,6 +1502,8 @@
           }
         }
       }
+      if (code > rune_max)
+        goto BadEscape;
       *rp = code;
       return true;
 
@@ -1375,7 +1589,8 @@
 BadEscape:
   // Unrecognized escape sequence.
   status->set_code(kRegexpBadEscape);
-  status->set_error_arg(StringPiece(begin, s->data() - begin));
+  status->set_error_arg(
+      StringPiece(begin, static_cast<size_t>(s->begin() - begin)));
   return false;
 }
 
@@ -1403,8 +1618,8 @@
 }
 
 // Look for a group with the given name.
-static UGroup* LookupGroup(const StringPiece& name,
-                           UGroup *groups, int ngroups) {
+static const UGroup* LookupGroup(const StringPiece& name,
+                                 const UGroup *groups, int ngroups) {
   // Simple name lookup.
   for (int i = 0; i < ngroups; i++)
     if (StringPiece(groups[i].name) == name)
@@ -1412,30 +1627,32 @@
   return NULL;
 }
 
+// Look for a POSIX group with the given name (e.g., "[:^alpha:]")
+static const UGroup* LookupPosixGroup(const StringPiece& name) {
+  return LookupGroup(name, posix_groups, num_posix_groups);
+}
+
+static const UGroup* LookupPerlGroup(const StringPiece& name) {
+  return LookupGroup(name, perl_groups, num_perl_groups);
+}
+
+#if !defined(RE2_USE_ICU)
 // Fake UGroup containing all Runes
 static URange16 any16[] = { { 0, 65535 } };
 static URange32 any32[] = { { 65536, Runemax } };
 static UGroup anygroup = { "Any", +1, any16, 1, any32, 1 };
 
-// Look for a POSIX group with the given name (e.g., "[:^alpha:]")
-static UGroup* LookupPosixGroup(const StringPiece& name) {
-  return LookupGroup(name, posix_groups, num_posix_groups);
-}
-
-static UGroup* LookupPerlGroup(const StringPiece& name) {
-  return LookupGroup(name, perl_groups, num_perl_groups);
-}
-
 // Look for a Unicode group with the given name (e.g., "Han")
-static UGroup* LookupUnicodeGroup(const StringPiece& name) {
+static const UGroup* LookupUnicodeGroup(const StringPiece& name) {
   // Special case: "Any" means any.
   if (name == StringPiece("Any"))
     return &anygroup;
   return LookupGroup(name, unicode_groups, num_unicode_groups);
 }
+#endif
 
 // Add a UGroup or its negation to the character class.
-static void AddUGroup(CharClassBuilder *cc, UGroup *g, int sign,
+static void AddUGroup(CharClassBuilder *cc, const UGroup *g, int sign,
                       Regexp::ParseFlags parse_flags) {
   if (sign == +1) {
     for (int i = 0; i < g->nr16; i++) {
@@ -1486,7 +1703,7 @@
 // On success, sets *s to span the remainder of the string
 // and returns the corresponding UGroup.
 // The StringPiece must *NOT* be edited unless the call succeeds.
-UGroup* MaybeParsePerlCCEscape(StringPiece* s, Regexp::ParseFlags parse_flags) {
+const UGroup* MaybeParsePerlCCEscape(StringPiece* s, Regexp::ParseFlags parse_flags) {
   if (!(parse_flags & Regexp::PerlClasses))
     return NULL;
   if (s->size() < 2 || (*s)[0] != '\\')
@@ -1494,7 +1711,7 @@
   // Could use StringPieceToRune, but there aren't
   // any non-ASCII Perl group names.
   StringPiece name(s->begin(), 2);
-  UGroup *g = LookupPerlGroup(name);
+  const UGroup *g = LookupPerlGroup(name);
   if (g == NULL)
     return NULL;
   s->remove_prefix(name.size());
@@ -1524,7 +1741,7 @@
   // Committed to parse.  Results:
   int sign = +1;  // -1 = negated char class
   if (c == 'P')
-    sign = -1;
+    sign = -sign;
   StringPiece seq = *s;  // \p{Han} or \pL
   StringPiece name;  // Han or L
   s->remove_prefix(2);  // '\\', 'p'
@@ -1534,11 +1751,11 @@
   if (c != '{') {
     // Name is the bit of string we just skipped over for c.
     const char* p = seq.begin() + 2;
-    name = StringPiece(p, s->begin() - p);
+    name = StringPiece(p, static_cast<size_t>(s->begin() - p));
   } else {
     // Name is in braces. Look for closing }
-    int end = s->find('}', 0);
-    if (end == s->npos) {
+    size_t end = s->find('}', 0);
+    if (end == StringPiece::npos) {
       if (!IsValidUTF8(seq, status))
         return kParseError;
       status->set_code(kRegexpBadCharRange);
@@ -1552,14 +1769,16 @@
   }
 
   // Chop seq where s now begins.
-  seq = StringPiece(seq.begin(), s->begin() - seq.begin());
+  seq = StringPiece(seq.begin(), static_cast<size_t>(s->begin() - seq.begin()));
 
-  // Look up group
   if (name.size() > 0 && name[0] == '^') {
     sign = -sign;
     name.remove_prefix(1);  // '^'
   }
-  UGroup *g = LookupUnicodeGroup(name);
+
+#if !defined(RE2_USE_ICU)
+  // Look up the group in the RE2 Unicode data.
+  const UGroup *g = LookupUnicodeGroup(name);
   if (g == NULL) {
     status->set_code(kRegexpBadCharRange);
     status->set_error_arg(seq);
@@ -1567,6 +1786,31 @@
   }
 
   AddUGroup(cc, g, sign, parse_flags);
+#else
+  // Look up the group in the ICU Unicode data. Because ICU provides full
+  // Unicode properties support, this could be more than a lookup by name.
+  ::icu::UnicodeString ustr = ::icu::UnicodeString::fromUTF8(
+      string("\\p{") + string(name) + string("}"));
+  UErrorCode uerr = U_ZERO_ERROR;
+  ::icu::UnicodeSet uset(ustr, uerr);
+  if (U_FAILURE(uerr)) {
+    status->set_code(kRegexpBadCharRange);
+    status->set_error_arg(seq);
+    return kParseError;
+  }
+
+  // Convert the UnicodeSet to a URange32 and UGroup that we can add.
+  int nr = uset.getRangeCount();
+  URange32* r = new URange32[nr];
+  for (int i = 0; i < nr; i++) {
+    r[i].lo = uset.getRangeStart(i);
+    r[i].hi = uset.getRangeEnd(i);
+  }
+  UGroup g = {"", +1, 0, 0, r, nr};
+  AddUGroup(cc, &g, sign, parse_flags);
+  delete[] r;
+#endif
+
   return kParseOk;
 }
 
@@ -1593,9 +1837,9 @@
 
   // Got it.  Check that it's valid.
   q += 2;
-  StringPiece name(p, q-p);
+  StringPiece name(p, static_cast<size_t>(q - p));
 
-  UGroup *g = LookupPosixGroup(name);
+  const UGroup *g = LookupPosixGroup(name);
   if (g == NULL) {
     status->set_code(kRegexpBadCharRange);
     status->set_error_arg(name);
@@ -1647,7 +1891,8 @@
       return false;
     if (rr->hi < rr->lo) {
       status->set_code(kRegexpBadCharRange);
-      status->set_error_arg(StringPiece(os.data(), s->data() - os.data()));
+      status->set_error_arg(
+          StringPiece(os.data(), static_cast<size_t>(s->data() - os.data())));
       return false;
     }
   } else {
@@ -1666,7 +1911,7 @@
   if (s->size() == 0 || (*s)[0] != '[') {
     // Caller checked this.
     status->set_code(kRegexpInternalError);
-    status->set_error_arg(NULL);
+    status->set_error_arg(StringPiece());
     return false;
   }
   bool negated = false;
@@ -1732,7 +1977,7 @@
     }
 
     // Look for Perl character class symbols (extension).
-    UGroup *g = MaybeParsePerlCCEscape(s, flags_);
+    const UGroup *g = MaybeParsePerlCCEscape(s, flags_);
     if (g != NULL) {
       AddUGroup(re->ccb_, g, g->sign, flags_);
       continue;
@@ -1761,7 +2006,6 @@
 
   if (negated)
     re->ccb_->Negate();
-  re->ccb_->RemoveAbove(rune_max_);
 
   *out_re = re;
   return true;
@@ -1774,7 +2018,7 @@
 static bool IsValidCaptureName(const StringPiece& name) {
   if (name.size() == 0)
     return false;
-  for (int i = 0; i < name.size(); i++) {
+  for (size_t i = 0; i < name.size(); i++) {
     int c = name[i];
     if (('0' <= c && c <= '9') ||
         ('a' <= c && c <= 'z') ||
@@ -1820,8 +2064,8 @@
   // so that's the one we implement.  One is enough.
   if (t.size() > 2 && t[0] == 'P' && t[1] == '<') {
     // Pull out name.
-    int end = t.find('>', 2);
-    if (end == t.npos) {
+    size_t end = t.find('>', 2);
+    if (end == StringPiece::npos) {
       if (!IsValidUTF8(*s, status_))
         return false;
       status_->set_code(kRegexpBadNamedCapture);
@@ -1845,7 +2089,7 @@
       return false;
     }
 
-    s->remove_prefix(capture.end() - s->begin());
+    s->remove_prefix(static_cast<size_t>(capture.end() - s->begin()));
     return true;
   }
 
@@ -1928,7 +2172,8 @@
 
 BadPerlOp:
   status_->set_code(kRegexpBadPerlOp);
-  status_->set_error_arg(StringPiece(s->begin(), t.begin() - s->begin()));
+  status_->set_error_arg(
+      StringPiece(s->begin(), static_cast<size_t>(t.begin() - s->begin())));
   return false;
 }
 
@@ -1940,7 +2185,7 @@
   char buf[UTFmax];
 
   utf->clear();
-  for (int i = 0; i < latin1.size(); i++) {
+  for (size_t i = 0; i < latin1.size(); i++) {
     Rune r = latin1[i] & 0xFF;
     int n = runetochar(buf, &r);
     utf->append(buf, n);
@@ -1981,9 +2226,9 @@
     return ps.DoFinish();
   }
 
-  StringPiece lastunary = NULL;
+  StringPiece lastunary = StringPiece();
   while (t.size() > 0) {
-    StringPiece isunary = NULL;
+    StringPiece isunary = StringPiece();
     switch (t[0]) {
       default: {
         Rune r;
@@ -2006,7 +2251,7 @@
           if (!ps.DoLeftParenNoCapture())
             return NULL;
         } else {
-          if (!ps.DoLeftParen(NULL))
+          if (!ps.DoLeftParen(StringPiece()))
             return NULL;
         }
         t.remove_prefix(1);  // '('
@@ -2075,12 +2320,14 @@
             //   a** is a syntax error, not a double-star.
             // (and a++ means something else entirely, which we don't support!)
             status->set_code(kRegexpRepeatOp);
-            status->set_error_arg(StringPiece(lastunary.begin(),
-                                              t.begin() - lastunary.begin()));
+            status->set_error_arg(StringPiece(
+                lastunary.begin(),
+                static_cast<size_t>(t.begin() - lastunary.begin())));
             return NULL;
           }
         }
-        opstr.set(opstr.data(), t.data() - opstr.data());
+        opstr = StringPiece(opstr.data(),
+                            static_cast<size_t>(t.data() - opstr.data()));
         if (!ps.PushRepeatOp(op, opstr, nongreedy))
           return NULL;
         isunary = opstr;
@@ -2106,12 +2353,14 @@
           if (lastunary.size() > 0) {
             // Not allowed to stack repetition operators.
             status->set_code(kRegexpRepeatOp);
-            status->set_error_arg(StringPiece(lastunary.begin(),
-                                              t.begin() - lastunary.begin()));
+            status->set_error_arg(StringPiece(
+                lastunary.begin(),
+                static_cast<size_t>(t.begin() - lastunary.begin())));
             return NULL;
           }
         }
-        opstr.set(opstr.data(), t.data() - opstr.data());
+        opstr = StringPiece(opstr.data(),
+                            static_cast<size_t>(t.data() - opstr.data()));
         if (!ps.PushRepetition(lo, hi, opstr, nongreedy))
           return NULL;
         isunary = opstr;
@@ -2187,7 +2436,7 @@
           }
         }
 
-        UGroup *g = MaybeParsePerlCCEscape(&t, ps.flags());
+        const UGroup *g = MaybeParsePerlCCEscape(&t, ps.flags());
         if (g != NULL) {
           Regexp* re = new Regexp(kRegexpCharClass, ps.flags() & ~FoldCase);
           re->ccb_ = new CharClassBuilder;
diff --git a/re2/perl_groups.cc b/re2/perl_groups.cc
index 1af5b43..422b388 100644
--- a/re2/perl_groups.cc
+++ b/re2/perl_groups.cc
@@ -5,21 +5,21 @@
 
 namespace re2 {
 
-static URange16 code1[] = {  /* \d */
+static const URange16 code1[] = {  /* \d */
 	{ 0x30, 0x39 },
 };
-static URange16 code2[] = {  /* \s */
+static const URange16 code2[] = {  /* \s */
 	{ 0x9, 0xa },
 	{ 0xc, 0xd },
 	{ 0x20, 0x20 },
 };
-static URange16 code3[] = {  /* \w */
+static const URange16 code3[] = {  /* \w */
 	{ 0x30, 0x39 },
 	{ 0x41, 0x5a },
 	{ 0x5f, 0x5f },
 	{ 0x61, 0x7a },
 };
-UGroup perl_groups[] = {
+const UGroup perl_groups[] = {
 	{ "\\d", +1, code1, 1 },
 	{ "\\D", -1, code1, 1 },
 	{ "\\s", +1, code2, 3 },
@@ -27,64 +27,64 @@
 	{ "\\w", +1, code3, 4 },
 	{ "\\W", -1, code3, 4 },
 };
-int num_perl_groups = 6;
-static URange16 code4[] = {  /* [:alnum:] */
+const int num_perl_groups = 6;
+static const URange16 code4[] = {  /* [:alnum:] */
 	{ 0x30, 0x39 },
 	{ 0x41, 0x5a },
 	{ 0x61, 0x7a },
 };
-static URange16 code5[] = {  /* [:alpha:] */
+static const URange16 code5[] = {  /* [:alpha:] */
 	{ 0x41, 0x5a },
 	{ 0x61, 0x7a },
 };
-static URange16 code6[] = {  /* [:ascii:] */
+static const URange16 code6[] = {  /* [:ascii:] */
 	{ 0x0, 0x7f },
 };
-static URange16 code7[] = {  /* [:blank:] */
+static const URange16 code7[] = {  /* [:blank:] */
 	{ 0x9, 0x9 },
 	{ 0x20, 0x20 },
 };
-static URange16 code8[] = {  /* [:cntrl:] */
+static const URange16 code8[] = {  /* [:cntrl:] */
 	{ 0x0, 0x1f },
 	{ 0x7f, 0x7f },
 };
-static URange16 code9[] = {  /* [:digit:] */
+static const URange16 code9[] = {  /* [:digit:] */
 	{ 0x30, 0x39 },
 };
-static URange16 code10[] = {  /* [:graph:] */
+static const URange16 code10[] = {  /* [:graph:] */
 	{ 0x21, 0x7e },
 };
-static URange16 code11[] = {  /* [:lower:] */
+static const URange16 code11[] = {  /* [:lower:] */
 	{ 0x61, 0x7a },
 };
-static URange16 code12[] = {  /* [:print:] */
+static const URange16 code12[] = {  /* [:print:] */
 	{ 0x20, 0x7e },
 };
-static URange16 code13[] = {  /* [:punct:] */
+static const URange16 code13[] = {  /* [:punct:] */
 	{ 0x21, 0x2f },
 	{ 0x3a, 0x40 },
 	{ 0x5b, 0x60 },
 	{ 0x7b, 0x7e },
 };
-static URange16 code14[] = {  /* [:space:] */
+static const URange16 code14[] = {  /* [:space:] */
 	{ 0x9, 0xd },
 	{ 0x20, 0x20 },
 };
-static URange16 code15[] = {  /* [:upper:] */
+static const URange16 code15[] = {  /* [:upper:] */
 	{ 0x41, 0x5a },
 };
-static URange16 code16[] = {  /* [:word:] */
+static const URange16 code16[] = {  /* [:word:] */
 	{ 0x30, 0x39 },
 	{ 0x41, 0x5a },
 	{ 0x5f, 0x5f },
 	{ 0x61, 0x7a },
 };
-static URange16 code17[] = {  /* [:xdigit:] */
+static const URange16 code17[] = {  /* [:xdigit:] */
 	{ 0x30, 0x39 },
 	{ 0x41, 0x46 },
 	{ 0x61, 0x66 },
 };
-UGroup posix_groups[] = {
+const UGroup posix_groups[] = {
 	{ "[:alnum:]", +1, code4, 3 },
 	{ "[:^alnum:]", -1, code4, 3 },
 	{ "[:alpha:]", +1, code5, 2 },
@@ -114,6 +114,6 @@
 	{ "[:xdigit:]", +1, code17, 3 },
 	{ "[:^xdigit:]", -1, code17, 3 },
 };
-int num_posix_groups = 28;
+const int num_posix_groups = 28;
 
 }  // namespace re2
diff --git a/re2/prefilter.cc b/re2/prefilter.cc
index 4b9c35d..b657357 100644
--- a/re2/prefilter.cc
+++ b/re2/prefilter.cc
@@ -2,36 +2,40 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-#include "util/util.h"
 #include "re2/prefilter.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string>
+#include <vector>
+
+#include "util/util.h"
+#include "util/logging.h"
+#include "util/strutil.h"
+#include "util/utf.h"
 #include "re2/re2.h"
 #include "re2/unicode_casefold.h"
 #include "re2/walker-inl.h"
 
 namespace re2 {
 
-static const int Trace = false;
+static const bool ExtraDebug = false;
 
-typedef set<string>::iterator SSIter;
-typedef set<string>::const_iterator ConstSSIter;
+typedef std::set<string>::iterator SSIter;
+typedef std::set<string>::const_iterator ConstSSIter;
 
-static int alloc_id = 100000;  // Used for debugging.
 // Initializes a Prefilter, allocating subs_ as necessary.
 Prefilter::Prefilter(Op op) {
   op_ = op;
   subs_ = NULL;
   if (op_ == AND || op_ == OR)
-    subs_ = new vector<Prefilter*>;
-
-  alloc_id_ = alloc_id++;
-  VLOG(10) << "alloc_id: " << alloc_id_;
+    subs_ = new std::vector<Prefilter*>;
 }
 
 // Destroys a Prefilter.
 Prefilter::~Prefilter() {
-  VLOG(10) << "Deleted: " << alloc_id_;
   if (subs_) {
-    for (int i = 0; i < subs_->size(); i++)
+    for (size_t i = 0; i < subs_->size(); i++)
       delete (*subs_)[i];
     delete subs_;
     subs_ = NULL;
@@ -45,7 +49,7 @@
   }
 
   // Nothing left in the AND/OR.
-  if (subs_->size() == 0) {
+  if (subs_->empty()) {
     if (op_ == AND)
       op_ = ALL;  // AND of nothing is true
     else
@@ -100,7 +104,7 @@
 
   // If a and b match op, merge their contents.
   if (a->op() == op && b->op() == op) {
-    for (int i = 0; i < b->subs()->size(); i++) {
+    for (size_t i = 0; i < b->subs()->size(); i++) {
       Prefilter* bb = (*b->subs())[i];
       a->subs()->push_back(bb);
     }
@@ -136,7 +140,7 @@
   return AndOr(OR, a, b);
 }
 
-static void SimplifyStringSet(set<string> *ss) {
+static void SimplifyStringSet(std::set<string> *ss) {
   // Now make sure that the strings aren't redundant.  For example, if
   // we know "ab" is a required string, then it doesn't help at all to
   // know that "abc" is also a required string, so delete "abc". This
@@ -157,7 +161,7 @@
   }
 }
 
-Prefilter* Prefilter::OrStrings(set<string>* ss) {
+Prefilter* Prefilter::OrStrings(std::set<string>* ss) {
   SimplifyStringSet(ss);
   Prefilter* or_prefilter = NULL;
   if (!ss->empty()) {
@@ -175,7 +179,7 @@
     return r;
   }
 
-  CaseFold *f = LookupCaseFold(unicode_tolower, num_unicode_tolower, r);
+  const CaseFold *f = LookupCaseFold(unicode_tolower, num_unicode_tolower, r);
   if (f == NULL || r < f->lo)
     return r;
   return ApplyFold(f, r);
@@ -210,7 +214,7 @@
   static Info* Quest(Info* a);
   static Info* EmptyString();
   static Info* NoMatch();
-  static Info* AnyChar();
+  static Info* AnyCharOrAnyByte();
   static Info* CClass(CharClass* cc, bool latin1);
   static Info* Literal(Rune r);
   static Info* LiteralLatin1(Rune r);
@@ -222,14 +226,14 @@
   // Caller takes ownership of the Prefilter.
   Prefilter* TakeMatch();
 
-  set<string>& exact() { return exact_; }
+  std::set<string>& exact() { return exact_; }
 
   bool is_exact() const { return is_exact_; }
 
   class Walker;
 
  private:
-  set<string> exact_;
+  std::set<string> exact_;
 
   // When is_exact_ is true, the strings that match
   // are placed in exact_. When it is no longer an exact
@@ -265,18 +269,12 @@
 
 // Format a Info in string form.
 string Prefilter::Info::ToString() {
-  if (this == NULL) {
-    // Sometimes when iterating on children of a node,
-    // some children might have NULL Info. Adding
-    // the check here for NULL to take care of cases where
-    // the caller is not checking.
-    return "";
-  }
-
   if (is_exact_) {
     int n = 0;
     string s;
-    for (set<string>::iterator i = exact_.begin(); i != exact_.end(); ++i) {
+    for (std::set<string>::iterator i = exact_.begin();
+         i != exact_.end();
+         ++i) {
       if (n++ > 0)
         s += ",";
       s += *i;
@@ -291,16 +289,17 @@
 }
 
 // Add the strings from src to dst.
-static void CopyIn(const set<string>& src, set<string>* dst) {
+static void CopyIn(const std::set<string>& src,
+                   std::set<string>* dst) {
   for (ConstSSIter i = src.begin(); i != src.end(); ++i)
     dst->insert(*i);
 }
 
 // Add the cross-product of a and b to dst.
 // (For each string i in a and j in b, add i+j.)
-static void CrossProduct(const set<string>& a,
-                         const set<string>& b,
-                         set<string>* dst) {
+static void CrossProduct(const std::set<string>& a,
+                         const std::set<string>& b,
+                         std::set<string>* dst) {
   for (ConstSSIter i = a.begin(); i != a.end(); ++i)
     for (ConstSSIter j = b.begin(); j != b.end(); ++j)
       dst->insert(*i + *j);
@@ -418,8 +417,8 @@
   return info;
 }
 
-// Constructs Info for dot (any character).
-Prefilter::Info* Prefilter::Info::AnyChar() {
+// Constructs Info for dot (any character) or \C (any byte).
+Prefilter::Info* Prefilter::Info::AnyCharOrAnyByte() {
   Prefilter::Info* info = new Prefilter::Info();
   info->match_ = new Prefilter(ALL);
   return info;
@@ -454,15 +453,15 @@
 typedef CharClass::iterator CCIter;
 Prefilter::Info* Prefilter::Info::CClass(CharClass *cc,
                                          bool latin1) {
-  if (Trace) {
-    VLOG(0) << "CharClassInfo:";
+  if (ExtraDebug) {
+    LOG(ERROR) << "CharClassInfo:";
     for (CCIter i = cc->begin(); i != cc->end(); ++i)
-      VLOG(0) << "  " << i->lo << "-" << i->hi;
+      LOG(ERROR) << "  " << i->lo << "-" << i->hi;
   }
 
   // If the class is too large, it's okay to overestimate.
   if (cc->size() > 10)
-    return AnyChar();
+    return AnyCharOrAnyByte();
 
   Prefilter::Info *a = new Prefilter::Info();
   for (CCIter i = cc->begin(); i != cc->end(); ++i)
@@ -477,9 +476,8 @@
 
   a->is_exact_ = true;
 
-  if (Trace) {
-    VLOG(0) << " = " << a->ToString();
-  }
+  if (ExtraDebug)
+    LOG(ERROR) << " = " << a->ToString();
 
   return a;
 }
@@ -500,15 +498,16 @@
   bool latin1() { return latin1_; }
  private:
   bool latin1_;
-  DISALLOW_EVIL_CONSTRUCTORS(Walker);
+
+  Walker(const Walker&) = delete;
+  Walker& operator=(const Walker&) = delete;
 };
 
 Prefilter::Info* Prefilter::BuildInfo(Regexp* re) {
-  if (Trace) {
-    LOG(INFO) << "BuildPrefilter::Info: " << re->ToString();
-  }
+  if (ExtraDebug)
+    LOG(ERROR) << "BuildPrefilter::Info: " << re->ToString();
 
-  bool latin1 = re->parse_flags() & Regexp::Latin1;
+  bool latin1 = (re->parse_flags() & Regexp::Latin1) != 0;
   Prefilter::Info::Walker w(latin1);
   Prefilter::Info* info = w.WalkExponential(re, NULL, 100000);
 
@@ -608,7 +607,6 @@
       info = child_args[0];
       for (int i = 1; i < nchild_args; i++)
         info = Alt(info, child_args[i]);
-      VLOG(10) << "Alt: " << info->ToString();
       break;
 
     case kRegexpStar:
@@ -624,8 +622,9 @@
       break;
 
     case kRegexpAnyChar:
+    case kRegexpAnyByte:
       // Claim nothing, except that it's not empty.
-      info = AnyChar();
+      info = AnyCharOrAnyByte();
       break;
 
     case kRegexpCharClass:
@@ -638,10 +637,9 @@
       break;
   }
 
-  if (Trace) {
-    VLOG(0) << "BuildInfo " << re->ToString()
-            << ": " << info->ToString();
-  }
+  if (ExtraDebug)
+    LOG(ERROR) << "BuildInfo " << re->ToString()
+               << ": " << (info ? info->ToString() : "");
 
   return info;
 }
@@ -665,9 +663,6 @@
 }
 
 string Prefilter::DebugString() const {
-  if (this == NULL)
-    return "<nil>";
-
   switch (op_) {
     default:
       LOG(DFATAL) << "Bad op in Prefilter::DebugString: " << op_;
@@ -680,19 +675,21 @@
       return "";
     case AND: {
       string s = "";
-      for (int i = 0; i < subs_->size(); i++) {
+      for (size_t i = 0; i < subs_->size(); i++) {
         if (i > 0)
           s += " ";
-        s += (*subs_)[i]->DebugString();
+        Prefilter* sub = (*subs_)[i];
+        s += sub ? sub->DebugString() : "<nil>";
       }
       return s;
     }
     case OR: {
       string s = "(";
-      for (int i = 0; i < subs_->size(); i++) {
+      for (size_t i = 0; i < subs_->size(); i++) {
         if (i > 0)
           s += "|";
-        s += (*subs_)[i]->DebugString();
+        Prefilter* sub = (*subs_)[i];
+        s += sub ? sub->DebugString() : "<nil>";
       }
       s += ")";
       return s;
diff --git a/re2/prefilter.h b/re2/prefilter.h
index c2f9ddd..ead09e1 100644
--- a/re2/prefilter.h
+++ b/re2/prefilter.h
@@ -2,14 +2,19 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+#ifndef RE2_PREFILTER_H_
+#define RE2_PREFILTER_H_
+
 // Prefilter is the class used to extract string guards from regexps.
 // Rather than using Prefilter class directly, use FilteredRE2.
 // See filtered_re2.h
 
-#ifndef RE2_PREFILTER_H_
-#define RE2_PREFILTER_H_
+#include <set>
+#include <string>
+#include <vector>
 
 #include "util/util.h"
+#include "util/logging.h"
 
 namespace re2 {
 
@@ -37,14 +42,14 @@
   int unique_id() const { return unique_id_; }
 
   // The children of the Prefilter node.
-  vector<Prefilter*>* subs() {
-    CHECK(op_ == AND || op_ == OR);
+  std::vector<Prefilter*>* subs() {
+    DCHECK(op_ == AND || op_ == OR);
     return subs_;
   }
 
   // Set the children vector. Prefilter takes ownership of subs and
   // subs_ will be deleted when Prefilter is deleted.
-  void set_subs(vector<Prefilter*>* subs) { subs_ = subs; }
+  void set_subs(std::vector<Prefilter*>* subs) { subs_ = subs; }
 
   // Given a RE2, return a Prefilter. The caller takes ownership of
   // the Prefilter and should deallocate it. Returns NULL if Prefilter
@@ -72,7 +77,7 @@
 
   static Prefilter* FromString(const string& str);
 
-  static Prefilter* OrStrings(set<string>* ss);
+  static Prefilter* OrStrings(std::set<string>* ss);
 
   static Info* BuildInfo(Regexp* re);
 
@@ -82,7 +87,7 @@
   Op op_;
 
   // Sub-matches for AND or OR Prefilter.
-  vector<Prefilter*>* subs_;
+  std::vector<Prefilter*>* subs_;
 
   // Actual string to match in leaf node.
   string atom_;
@@ -94,10 +99,8 @@
   // and -1 for duplicate nodes.
   int unique_id_;
 
-  // Used for debugging, helps in tracking memory leaks.
-  int alloc_id_;
-
-  DISALLOW_EVIL_CONSTRUCTORS(Prefilter);
+  Prefilter(const Prefilter&) = delete;
+  Prefilter& operator=(const Prefilter&) = delete;
 };
 
 }  // namespace re2
diff --git a/re2/prefilter_tree.cc b/re2/prefilter_tree.cc
index d8bc37a..a07de40 100644
--- a/re2/prefilter_tree.cc
+++ b/re2/prefilter_tree.cc
@@ -2,54 +2,153 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-#include "util/util.h"
-#include "util/flags.h"
-#include "re2/prefilter.h"
 #include "re2/prefilter_tree.h"
-#include "re2/re2.h"
 
-DEFINE_int32(filtered_re2_min_atom_len,
-             3,
-             "Strings less than this length are not stored as atoms");
+#include <stddef.h>
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "util/util.h"
+#include "util/logging.h"
+#include "util/strutil.h"
+#include "re2/prefilter.h"
+#include "re2/re2.h"
 
 namespace re2 {
 
+static const bool ExtraDebug = false;
+
 PrefilterTree::PrefilterTree()
-    : compiled_(false) {
+    : compiled_(false),
+      min_atom_len_(3) {
+}
+
+PrefilterTree::PrefilterTree(int min_atom_len)
+    : compiled_(false),
+      min_atom_len_(min_atom_len) {
 }
 
 PrefilterTree::~PrefilterTree() {
-  for (int i = 0; i < prefilter_vec_.size(); i++)
+  for (size_t i = 0; i < prefilter_vec_.size(); i++)
     delete prefilter_vec_[i];
 
-  for (int i = 0; i < entries_.size(); i++)
+  for (size_t i = 0; i < entries_.size(); i++)
     delete entries_[i].parents;
 }
 
-// Functions used for adding and Compiling prefilters to the
-// PrefilterTree.
-static bool KeepPart(Prefilter* prefilter, int level) {
-  if (prefilter == NULL)
+void PrefilterTree::Add(Prefilter* prefilter) {
+  if (compiled_) {
+    LOG(DFATAL) << "Add called after Compile.";
+    return;
+  }
+  if (prefilter != NULL && !KeepNode(prefilter)) {
+    delete prefilter;
+    prefilter = NULL;
+  }
+
+  prefilter_vec_.push_back(prefilter);
+}
+
+void PrefilterTree::Compile(std::vector<string>* atom_vec) {
+  if (compiled_) {
+    LOG(DFATAL) << "Compile called already.";
+    return;
+  }
+
+  // Some legacy users of PrefilterTree call Compile() before
+  // adding any regexps and expect Compile() to have no effect.
+  if (prefilter_vec_.empty())
+    return;
+
+  compiled_ = true;
+
+  // TODO(junyer): Use std::unordered_set<Prefilter*> instead?
+  NodeMap nodes;
+  AssignUniqueIds(&nodes, atom_vec);
+
+  // Identify nodes that are too common among prefilters and are
+  // triggering too many parents. Then get rid of them if possible.
+  // Note that getting rid of a prefilter node simply means they are
+  // no longer necessary for their parent to trigger; that is, we do
+  // not miss out on any regexps triggering by getting rid of a
+  // prefilter node.
+  for (size_t i = 0; i < entries_.size(); i++) {
+    StdIntMap* parents = entries_[i].parents;
+    if (parents->size() > 8) {
+      // This one triggers too many things. If all the parents are AND
+      // nodes and have other things guarding them, then get rid of
+      // this trigger. TODO(vsri): Adjust the threshold appropriately,
+      // make it a function of total number of nodes?
+      bool have_other_guard = true;
+      for (StdIntMap::iterator it = parents->begin();
+           it != parents->end(); ++it) {
+        have_other_guard = have_other_guard &&
+            (entries_[it->first].propagate_up_at_count > 1);
+      }
+
+      if (have_other_guard) {
+        for (StdIntMap::iterator it = parents->begin();
+             it != parents->end(); ++it)
+          entries_[it->first].propagate_up_at_count -= 1;
+
+        parents->clear();  // Forget the parents
+      }
+    }
+  }
+
+  if (ExtraDebug)
+    PrintDebugInfo(&nodes);
+}
+
+Prefilter* PrefilterTree::CanonicalNode(NodeMap* nodes, Prefilter* node) {
+  string node_string = NodeString(node);
+  std::map<string, Prefilter*>::iterator iter = nodes->find(node_string);
+  if (iter == nodes->end())
+    return NULL;
+  return (*iter).second;
+}
+
+string PrefilterTree::NodeString(Prefilter* node) const {
+  // Adding the operation disambiguates AND/OR/atom nodes.
+  string s = StringPrintf("%d", node->op()) + ":";
+  if (node->op() == Prefilter::ATOM) {
+    s += node->atom();
+  } else {
+    for (size_t i = 0; i < node->subs()->size(); i++) {
+      if (i > 0)
+        s += ',';
+      s += StringPrintf("%d", (*node->subs())[i]->unique_id());
+    }
+  }
+  return s;
+}
+
+bool PrefilterTree::KeepNode(Prefilter* node) const {
+  if (node == NULL)
     return false;
 
-  switch (prefilter->op()) {
+  switch (node->op()) {
     default:
-      LOG(DFATAL) << "Unexpected op in KeepPart: "
-                  << prefilter->op();
+      LOG(DFATAL) << "Unexpected op in KeepNode: " << node->op();
       return false;
 
     case Prefilter::ALL:
+    case Prefilter::NONE:
       return false;
 
     case Prefilter::ATOM:
-      return prefilter->atom().size() >=
-          FLAGS_filtered_re2_min_atom_len;
+      return node->atom().size() >= static_cast<size_t>(min_atom_len_);
 
     case Prefilter::AND: {
       int j = 0;
-      vector<Prefilter*>* subs = prefilter->subs();
-      for (int i = 0; i < subs->size(); i++)
-        if (KeepPart((*subs)[i], level + 1))
+      std::vector<Prefilter*>* subs = node->subs();
+      for (size_t i = 0; i < subs->size(); i++)
+        if (KeepNode((*subs)[i]))
           (*subs)[j++] = (*subs)[i];
         else
           delete (*subs)[i];
@@ -59,114 +158,26 @@
     }
 
     case Prefilter::OR:
-      for (int i = 0; i < prefilter->subs()->size(); i++)
-        if (!KeepPart((*prefilter->subs())[i], level + 1))
+      for (size_t i = 0; i < node->subs()->size(); i++)
+        if (!KeepNode((*node->subs())[i]))
           return false;
       return true;
   }
 }
 
-void PrefilterTree::Add(Prefilter *f) {
-  if (compiled_) {
-    LOG(DFATAL) << "Add after Compile.";
-    return;
-  }
-  if (f != NULL && !KeepPart(f, 0)) {
-    delete f;
-    f = NULL;
-  }
-
-  prefilter_vec_.push_back(f);
-}
-
-void PrefilterTree::Compile(vector<string>* atom_vec) {
-  if (compiled_) {
-    LOG(DFATAL) << "Compile after Compile.";
-    return;
-  }
-
-  // We do this check to support some legacy uses of
-  // PrefilterTree that call Compile before adding any regexps,
-  // and expect Compile not to have effect.
-  if (prefilter_vec_.empty())
-    return;
-
-  compiled_ = true;
-
-  AssignUniqueIds(atom_vec);
-
-  // Identify nodes that are too common among prefilters and are
-  // triggering too many parents. Then get rid of them if possible.
-  // Note that getting rid of a prefilter node simply means they are
-  // no longer necessary for their parent to trigger; that is, we do
-  // not miss out on any regexps triggering by getting rid of a
-  // prefilter node.
-  for (int i = 0; i < entries_.size(); i++) {
-    IntMap* parents = entries_[i].parents;
-    if (parents->size() > 8) {
-      // This one triggers too many things. If all the parents are AND
-      // nodes and have other things guarding them, then get rid of
-      // this trigger. TODO(vsri): Adjust the threshold appropriately,
-      // make it a function of total number of nodes?
-      bool have_other_guard = true;
-      for (IntMap::iterator it = parents->begin(); it != parents->end(); ++it)
-        have_other_guard = have_other_guard &&
-            (entries_[it->index()].propagate_up_at_count > 1);
-
-      if (have_other_guard) {
-        for (IntMap::iterator it = parents->begin();
-             it != parents->end(); ++it)
-          entries_[it->index()].propagate_up_at_count -= 1;
-
-        parents->clear();  // Forget the parents
-      }
-    }
-  }
-
-  PrintDebugInfo();
-}
-
-Prefilter* PrefilterTree::CanonicalNode(Prefilter* node) {
-  string node_string = NodeString(node);
-  map<string, Prefilter*>::iterator iter = node_map_.find(node_string);
-  if (iter == node_map_.end())
-    return NULL;
-  return (*iter).second;
-}
-
-static string Itoa(int n) {
-  char buf[100];
-  snprintf(buf, sizeof buf, "%d", n);
-  return string(buf);
-}
-
-string PrefilterTree::NodeString(Prefilter* node) const {
-  // Adding the operation disambiguates AND/OR/atom nodes.
-  string s = Itoa(node->op()) + ":";
-  if (node->op() == Prefilter::ATOM) {
-    s += node->atom();
-  } else {
-    for (int i = 0; i < node->subs()->size() ; i++) {
-      if (i > 0)
-        s += ',';
-      s += Itoa((*node->subs())[i]->unique_id());
-    }
-  }
-  return s;
-}
-
-void PrefilterTree::AssignUniqueIds(vector<string>* atom_vec) {
+void PrefilterTree::AssignUniqueIds(NodeMap* nodes,
+                                    std::vector<string>* atom_vec) {
   atom_vec->clear();
 
   // Build vector of all filter nodes, sorted topologically
   // from top to bottom in v.
-  vector<Prefilter*> v;
+  std::vector<Prefilter*> v;
 
   // Add the top level nodes of each regexp prefilter.
-  for (int i = 0; i < prefilter_vec_.size(); i++) {
+  for (size_t i = 0; i < prefilter_vec_.size(); i++) {
     Prefilter* f = prefilter_vec_[i];
     if (f == NULL)
-      unfiltered_.push_back(i);
+      unfiltered_.push_back(static_cast<int>(i));
 
     // We push NULL also on to v, so that we maintain the
     // mapping of index==regexpid for level=0 prefilter nodes.
@@ -174,29 +185,29 @@
   }
 
   // Now add all the descendant nodes.
-  for (int i = 0; i < v.size(); i++) {
+  for (size_t i = 0; i < v.size(); i++) {
     Prefilter* f = v[i];
     if (f == NULL)
       continue;
     if (f->op() == Prefilter::AND || f->op() == Prefilter::OR) {
-      const vector<Prefilter*>& subs = *f->subs();
-      for (int j = 0; j < subs.size(); j++)
+      const std::vector<Prefilter*>& subs = *f->subs();
+      for (size_t j = 0; j < subs.size(); j++)
         v.push_back(subs[j]);
     }
   }
 
   // Identify unique nodes.
   int unique_id = 0;
-  for (int i = v.size() - 1; i >= 0; i--) {
+  for (int i = static_cast<int>(v.size()) - 1; i >= 0; i--) {
     Prefilter *node = v[i];
     if (node == NULL)
       continue;
     node->set_unique_id(-1);
-    Prefilter* canonical = CanonicalNode(node);
+    Prefilter* canonical = CanonicalNode(nodes, node);
     if (canonical == NULL) {
       // Any further nodes that have the same node string
       // will find this node as the canonical node.
-      node_map_[NodeString(node)] = node;
+      nodes->emplace(NodeString(node), node);
       if (node->op() == Prefilter::ATOM) {
         atom_vec->push_back(node->atom());
         atom_index_to_id_.push_back(unique_id);
@@ -206,28 +217,28 @@
       node->set_unique_id(canonical->unique_id());
     }
   }
-  entries_.resize(node_map_.size());
+  entries_.resize(nodes->size());
 
-  // Create parent IntMap for the entries.
-  for (int i = v.size()  - 1; i >= 0; i--) {
+  // Create parent StdIntMap for the entries.
+  for (int i = static_cast<int>(v.size()) - 1; i >= 0; i--) {
     Prefilter* prefilter = v[i];
     if (prefilter == NULL)
       continue;
 
-    if (CanonicalNode(prefilter) != prefilter)
+    if (CanonicalNode(nodes, prefilter) != prefilter)
       continue;
 
     Entry* entry = &entries_[prefilter->unique_id()];
-    entry->parents = new IntMap(node_map_.size());
+    entry->parents = new StdIntMap();
   }
 
   // Fill the entries.
-  for (int i = v.size()  - 1; i >= 0; i--) {
+  for (int i = static_cast<int>(v.size()) - 1; i >= 0; i--) {
     Prefilter* prefilter = v[i];
     if (prefilter == NULL)
       continue;
 
-    if (CanonicalNode(prefilter) != prefilter)
+    if (CanonicalNode(nodes, prefilter) != prefilter)
       continue;
 
     Entry* entry = &entries_[prefilter->unique_id()];
@@ -244,24 +255,26 @@
 
       case Prefilter::OR:
       case Prefilter::AND: {
-        IntMap uniq_child(node_map_.size());
-        for (int j = 0; j < prefilter->subs()->size() ; j++) {
+        std::set<int> uniq_child;
+        for (size_t j = 0; j < prefilter->subs()->size(); j++) {
           Prefilter* child = (*prefilter->subs())[j];
-          Prefilter* canonical = CanonicalNode(child);
+          Prefilter* canonical = CanonicalNode(nodes, child);
           if (canonical == NULL) {
             LOG(DFATAL) << "Null canonical node";
             return;
           }
           int child_id = canonical->unique_id();
-          if (!uniq_child.has_index(child_id))
-            uniq_child.set_new(child_id, 1);
+          uniq_child.insert(child_id);
           // To the child, we want to add to parent indices.
           Entry* child_entry = &entries_[child_id];
-          if (!child_entry->parents->has_index(prefilter->unique_id()))
-            child_entry->parents->set_new(prefilter->unique_id(), 1);
+          if (child_entry->parents->find(prefilter->unique_id()) ==
+              child_entry->parents->end()) {
+            (*child_entry->parents)[prefilter->unique_id()] = 1;
+          }
         }
-        entry->propagate_up_at_count =
-            prefilter->op() == Prefilter::AND ? uniq_child.size() : 1;
+        entry->propagate_up_at_count = prefilter->op() == Prefilter::AND
+                                           ? static_cast<int>(uniq_child.size())
+                                           : 1;
 
         break;
       }
@@ -269,67 +282,65 @@
   }
 
   // For top level nodes, populate regexp id.
-  for (int i = 0; i < prefilter_vec_.size(); i++) {
+  for (size_t i = 0; i < prefilter_vec_.size(); i++) {
     if (prefilter_vec_[i] == NULL)
       continue;
-    int id = CanonicalNode(prefilter_vec_[i])->unique_id();
+    int id = CanonicalNode(nodes, prefilter_vec_[i])->unique_id();
     DCHECK_LE(0, id);
     Entry* entry = &entries_[id];
-    entry->regexps.push_back(i);
+    entry->regexps.push_back(static_cast<int>(i));
   }
 }
 
 // Functions for triggering during search.
 void PrefilterTree::RegexpsGivenStrings(
-    const vector<int>& matched_atoms,
-    vector<int>* regexps) const {
+    const std::vector<int>& matched_atoms,
+    std::vector<int>* regexps) const {
   regexps->clear();
   if (!compiled_) {
-    LOG(WARNING) << "Compile() not called";
-    for (int i = 0; i < prefilter_vec_.size(); ++i)
-      regexps->push_back(i);
-  } else {
-    if (!prefilter_vec_.empty()) {
-      IntMap regexps_map(prefilter_vec_.size());
-      vector<int> matched_atom_ids;
-      for (int j = 0; j < matched_atoms.size(); j++) {
-        matched_atom_ids.push_back(atom_index_to_id_[matched_atoms[j]]);
-        VLOG(10) << "Atom id:" << atom_index_to_id_[matched_atoms[j]];
-      }
-      PropagateMatch(matched_atom_ids, &regexps_map);
-      for (IntMap::iterator it = regexps_map.begin();
-           it != regexps_map.end();
-           ++it)
-        regexps->push_back(it->index());
+    // Some legacy users of PrefilterTree call Compile() before
+    // adding any regexps and expect Compile() to have no effect.
+    // This kludge is a counterpart to that kludge.
+    if (prefilter_vec_.empty())
+      return;
 
-      regexps->insert(regexps->end(), unfiltered_.begin(), unfiltered_.end());
-    }
+    LOG(ERROR) << "RegexpsGivenStrings called before Compile.";
+    for (size_t i = 0; i < prefilter_vec_.size(); i++)
+      regexps->push_back(static_cast<int>(i));
+  } else {
+    IntMap regexps_map(static_cast<int>(prefilter_vec_.size()));
+    std::vector<int> matched_atom_ids;
+    for (size_t j = 0; j < matched_atoms.size(); j++)
+      matched_atom_ids.push_back(atom_index_to_id_[matched_atoms[j]]);
+    PropagateMatch(matched_atom_ids, &regexps_map);
+    for (IntMap::iterator it = regexps_map.begin();
+         it != regexps_map.end();
+         ++it)
+      regexps->push_back(it->index());
+
+    regexps->insert(regexps->end(), unfiltered_.begin(), unfiltered_.end());
   }
-  sort(regexps->begin(), regexps->end());
+  std::sort(regexps->begin(), regexps->end());
 }
 
-void PrefilterTree::PropagateMatch(const vector<int>& atom_ids,
+void PrefilterTree::PropagateMatch(const std::vector<int>& atom_ids,
                                    IntMap* regexps) const {
-  IntMap count(entries_.size());
-  IntMap work(entries_.size());
-  for (int i = 0; i < atom_ids.size(); i++)
+  IntMap count(static_cast<int>(entries_.size()));
+  IntMap work(static_cast<int>(entries_.size()));
+  for (size_t i = 0; i < atom_ids.size(); i++)
     work.set(atom_ids[i], 1);
   for (IntMap::iterator it = work.begin(); it != work.end(); ++it) {
     const Entry& entry = entries_[it->index()];
-    VLOG(10) << "Processing: " << it->index();
     // Record regexps triggered.
-    for (int i = 0; i < entry.regexps.size(); i++) {
-      VLOG(10) << "Regexp triggered: " << entry.regexps[i];
+    for (size_t i = 0; i < entry.regexps.size(); i++)
       regexps->set(entry.regexps[i], 1);
-    }
     int c;
     // Pass trigger up to parents.
-    for (IntMap::iterator it = entry.parents->begin();
+    for (StdIntMap::iterator it = entry.parents->begin();
          it != entry.parents->end();
          ++it) {
-      int j = it->index();
+      int j = it->first;
       const Entry& parent = entries_[j];
-      VLOG(10) << " parent= " << j << " trig= " << parent.propagate_up_at_count;
       // Delay until all the children have succeeded.
       if (parent.propagate_up_at_count > 1) {
         if (count.has_index(j)) {
@@ -342,7 +353,6 @@
         if (c < parent.propagate_up_at_count)
           continue;
       }
-      VLOG(10) << "Triggering: " << j;
       // Trigger the parent.
       work.set(j, 1);
     }
@@ -351,26 +361,26 @@
 
 // Debugging help.
 void PrefilterTree::PrintPrefilter(int regexpid) {
-  LOG(INFO) << DebugNodeString(prefilter_vec_[regexpid]);
+  LOG(ERROR) << DebugNodeString(prefilter_vec_[regexpid]);
 }
 
-void PrefilterTree::PrintDebugInfo() {
-  VLOG(10) << "#Unique Atoms: " << atom_index_to_id_.size();
-  VLOG(10) << "#Unique Nodes: " << entries_.size();
+void PrefilterTree::PrintDebugInfo(NodeMap* nodes) {
+  LOG(ERROR) << "#Unique Atoms: " << atom_index_to_id_.size();
+  LOG(ERROR) << "#Unique Nodes: " << entries_.size();
 
-  for (int i = 0; i < entries_.size(); ++i) {
-    IntMap* parents = entries_[i].parents;
-    const vector<int>& regexps = entries_[i].regexps;
-    VLOG(10) << "EntryId: " << i
-            << " N: " << parents->size() << " R: " << regexps.size();
-    for (IntMap::iterator it = parents->begin(); it != parents->end(); ++it)
-      VLOG(10) << it->index();
+  for (size_t i = 0; i < entries_.size(); i++) {
+    StdIntMap* parents = entries_[i].parents;
+    const std::vector<int>& regexps = entries_[i].regexps;
+    LOG(ERROR) << "EntryId: " << i
+               << " N: " << parents->size() << " R: " << regexps.size();
+    for (StdIntMap::iterator it = parents->begin(); it != parents->end(); ++it)
+      LOG(ERROR) << it->first;
   }
-  VLOG(10) << "Map:";
-  for (map<string, Prefilter*>::const_iterator iter = node_map_.begin();
-       iter != node_map_.end(); ++iter)
-    VLOG(10) << "NodeId: " << (*iter).second->unique_id()
-            << " Str: " << (*iter).first;
+  LOG(ERROR) << "Map:";
+  for (std::map<string, Prefilter*>::const_iterator iter = nodes->begin();
+       iter != nodes->end(); ++iter)
+    LOG(ERROR) << "NodeId: " << (*iter).second->unique_id()
+               << " Str: " << (*iter).first;
 }
 
 string PrefilterTree::DebugNodeString(Prefilter* node) const {
@@ -383,10 +393,10 @@
     // Adding the operation disambiguates AND and OR nodes.
     node_string +=  node->op() == Prefilter::AND ? "AND" : "OR";
     node_string += "(";
-    for (int i = 0; i < node->subs()->size() ; i++) {
+    for (size_t i = 0; i < node->subs()->size(); i++) {
       if (i > 0)
         node_string += ',';
-      node_string += Itoa((*node->subs())[i]->unique_id());
+      node_string += StringPrintf("%d", (*node->subs())[i]->unique_id());
       node_string += ":";
       node_string += DebugNodeString((*node->subs())[i]);
     }
diff --git a/re2/prefilter_tree.h b/re2/prefilter_tree.h
index 596b734..f81e134 100644
--- a/re2/prefilter_tree.h
+++ b/re2/prefilter_tree.h
@@ -2,6 +2,9 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+#ifndef RE2_PREFILTER_TREE_H_
+#define RE2_PREFILTER_TREE_H_
+
 // The PrefilterTree class is used to form an AND-OR tree of strings
 // that would trigger each regexp. The 'prefilter' of each regexp is
 // added tp PrefilterTree, and then PrefilterTree is used to find all
@@ -12,22 +15,21 @@
 // favorite engine. PrefilterTree provides a set of strings (called
 // atoms) that the user of this class should use to do the string
 // matching.
-//
-#ifndef RE2_PREFILTER_TREE_H_
-#define RE2_PREFILTER_TREE_H_
+
+#include <map>
+#include <string>
+#include <vector>
 
 #include "util/util.h"
 #include "util/sparse_array.h"
+#include "re2/prefilter.h"
 
 namespace re2 {
 
-typedef SparseArray<int> IntMap;
-
-class Prefilter;
-
 class PrefilterTree {
  public:
   PrefilterTree();
+  explicit PrefilterTree(int min_atom_len);
   ~PrefilterTree();
 
   // Adds the prefilter for the next regexp. Note that we assume that
@@ -41,20 +43,24 @@
   // The caller should use the returned set of strings to do string matching.
   // Each time a string matches, the corresponding index then has to be
   // and passed to RegexpsGivenStrings below.
-  void Compile(vector<string>* atom_vec);
+  void Compile(std::vector<string>* atom_vec);
 
   // Given the indices of the atoms that matched, returns the indexes
   // of regexps that should be searched.  The matched_atoms should
   // contain all the ids of string atoms that were found to match the
   // content. The caller can use any string match engine to perform
   // this function. This function is thread safe.
-  void RegexpsGivenStrings(const vector<int>& matched_atoms,
-                           vector<int>* regexps) const;
+  void RegexpsGivenStrings(const std::vector<int>& matched_atoms,
+                           std::vector<int>* regexps) const;
 
   // Print debug prefilter. Also prints unique ids associated with
   // nodes of the prefilter of the regexp.
   void PrintPrefilter(int regexpid);
 
+ private:
+  typedef SparseArray<int> IntMap;
+  typedef std::map<int, int> StdIntMap;
+  typedef std::map<string, Prefilter*> NodeMap;
 
   // Each unique node has a corresponding Entry that helps in
   // passing the matching trigger information along the tree.
@@ -71,26 +77,28 @@
     // are two different nodes, but they share the atom 'def'. So when
     // 'def' matches, it triggers two parents, corresponding to the two
     // different OR nodes.
-    IntMap* parents;
+    StdIntMap* parents;
 
     // When this node is ready to trigger the parent, what are the
     // regexps that are triggered.
-    vector<int> regexps;
+    std::vector<int> regexps;
   };
 
- private:
+  // Returns true if the prefilter node should be kept.
+  bool KeepNode(Prefilter* node) const;
+
   // This function assigns unique ids to various parts of the
   // prefilter, by looking at if these nodes are already in the
   // PrefilterTree.
-  void AssignUniqueIds(vector<string>* atom_vec);
+  void AssignUniqueIds(NodeMap* nodes, std::vector<string>* atom_vec);
 
   // Given the matching atoms, find the regexps to be triggered.
-  void PropagateMatch(const vector<int>& atom_ids,
+  void PropagateMatch(const std::vector<int>& atom_ids,
                       IntMap* regexps) const;
 
   // Returns the prefilter node that has the same NodeString as this
   // node. For the canonical node, returns node.
-  Prefilter* CanonicalNode(Prefilter* node);
+  Prefilter* CanonicalNode(NodeMap* nodes, Prefilter* node);
 
   // A string that uniquely identifies the node. Assumes that the
   // children of node has already been assigned unique ids.
@@ -100,29 +108,30 @@
   string DebugNodeString(Prefilter* node) const;
 
   // Used for debugging.
-  void PrintDebugInfo();
+  void PrintDebugInfo(NodeMap* nodes);
 
   // These are all the nodes formed by Compile. Essentially, there is
   // one node for each unique atom and each unique AND/OR node.
-  vector<Entry> entries_;
-
-  // Map node string to canonical Prefilter node.
-  map<string, Prefilter*> node_map_;
+  std::vector<Entry> entries_;
 
   // indices of regexps that always pass through the filter (since we
   // found no required literals in these regexps).
-  vector<int> unfiltered_;
+  std::vector<int> unfiltered_;
 
   // vector of Prefilter for all regexps.
-  vector<Prefilter*> prefilter_vec_;
+  std::vector<Prefilter*> prefilter_vec_;
 
   // Atom index in returned strings to entry id mapping.
-  vector<int> atom_index_to_id_;
+  std::vector<int> atom_index_to_id_;
 
   // Has the prefilter tree been compiled.
   bool compiled_;
 
-  DISALLOW_EVIL_CONSTRUCTORS(PrefilterTree);
+  // Strings less than this length are not stored as atoms.
+  const int min_atom_len_;
+
+  PrefilterTree(const PrefilterTree&) = delete;
+  PrefilterTree& operator=(const PrefilterTree&) = delete;
 };
 
 }  // namespace
diff --git a/re2/prog.cc b/re2/prog.cc
index ef9ef23..fa03af9 100644
--- a/re2/prog.cc
+++ b/re2/prog.cc
@@ -5,48 +5,57 @@
 // Compiled regular expression representation.
 // Tested by compile_test.cc
 
-#include "util/util.h"
-#include "util/sparse_set.h"
 #include "re2/prog.h"
+
+#include <stdint.h>
+#include <string.h>
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "util/util.h"
+#include "util/logging.h"
+#include "util/strutil.h"
+#include "re2/bitmap256.h"
 #include "re2/stringpiece.h"
 
 namespace re2 {
 
 // Constructors per Inst opcode
 
-void Prog::Inst::InitAlt(uint32 out, uint32 out1) {
+void Prog::Inst::InitAlt(uint32_t out, uint32_t out1) {
   DCHECK_EQ(out_opcode_, 0);
   set_out_opcode(out, kInstAlt);
   out1_ = out1;
 }
 
-void Prog::Inst::InitByteRange(int lo, int hi, int foldcase, uint32 out) {
+void Prog::Inst::InitByteRange(int lo, int hi, int foldcase, uint32_t out) {
   DCHECK_EQ(out_opcode_, 0);
   set_out_opcode(out, kInstByteRange);
   lo_ = lo & 0xFF;
   hi_ = hi & 0xFF;
-  foldcase_ = foldcase;
+  foldcase_ = foldcase & 0xFF;
 }
 
-void Prog::Inst::InitCapture(int cap, uint32 out) {
+void Prog::Inst::InitCapture(int cap, uint32_t out) {
   DCHECK_EQ(out_opcode_, 0);
   set_out_opcode(out, kInstCapture);
   cap_ = cap;
 }
 
-void Prog::Inst::InitEmptyWidth(EmptyOp empty, uint32 out) {
+void Prog::Inst::InitEmptyWidth(EmptyOp empty, uint32_t out) {
   DCHECK_EQ(out_opcode_, 0);
   set_out_opcode(out, kInstEmptyWidth);
   empty_ = empty;
 }
 
-void Prog::Inst::InitMatch(int32 id) {
+void Prog::Inst::InitMatch(int32_t id) {
   DCHECK_EQ(out_opcode_, 0);
   set_opcode(kInstMatch);
   match_id_ = id;
 }
 
-void Prog::Inst::InitNop(uint32 out) {
+void Prog::Inst::InitNop(uint32_t out) {
   DCHECK_EQ(out_opcode_, 0);
   set_opcode(kInstNop);
 }
@@ -94,34 +103,25 @@
   : anchor_start_(false),
     anchor_end_(false),
     reversed_(false),
+    did_flatten_(false),
     did_onepass_(false),
     start_(0),
     start_unanchored_(0),
     size_(0),
-    byte_inst_count_(0),
     bytemap_range_(0),
+    first_byte_(-1),
     flags_(0),
-    onepass_statesize_(0),
-    inst_(NULL),
-    dfa_first_(NULL),
-    dfa_longest_(NULL),
-    dfa_mem_(0),
-    delete_dfa_(NULL),
-    unbytemap_(NULL),
+    list_count_(0),
     onepass_nodes_(NULL),
-    onepass_start_(NULL) {
+    dfa_mem_(0),
+    dfa_first_(NULL),
+    dfa_longest_(NULL) {
 }
 
 Prog::~Prog() {
-  if (delete_dfa_) {
-    if (dfa_first_)
-      delete_dfa_(dfa_first_);
-    if (dfa_longest_)
-      delete_dfa_(dfa_longest_);
-  }
+  DeleteDFA(dfa_longest_);
+  DeleteDFA(dfa_first_);
   delete[] onepass_nodes_;
-  delete[] inst_;
-  delete[] unbytemap_;
 }
 
 typedef SparseSet Workq;
@@ -133,7 +133,6 @@
 
 static string ProgToString(Prog* prog, Workq* q) {
   string s;
-
   for (Workq::iterator i = q->begin(); i != q->end(); ++i) {
     int id = *i;
     Prog::Inst* ip = prog->inst(id);
@@ -145,29 +144,56 @@
   return s;
 }
 
-string Prog::Dump() {
-  string map;
-  if (false) {  // Debugging
-    int lo = 0;
-    StringAppendF(&map, "byte map:\n");
-    for (int i = 0; i < bytemap_range_; i++) {
-      StringAppendF(&map, "\t%d. [%02x-%02x]\n", i, lo, unbytemap_[i]);
-      lo = unbytemap_[i] + 1;
-    }
-    StringAppendF(&map, "\n");
+static string FlattenedProgToString(Prog* prog, int start) {
+  string s;
+  for (int id = start; id < prog->size(); id++) {
+    Prog::Inst* ip = prog->inst(id);
+    if (ip->last())
+      StringAppendF(&s, "%d. %s\n", id, ip->Dump().c_str());
+    else
+      StringAppendF(&s, "%d+ %s\n", id, ip->Dump().c_str());
   }
+  return s;
+}
+
+string Prog::Dump() {
+  if (did_flatten_)
+    return FlattenedProgToString(this, start_);
 
   Workq q(size_);
   AddToQueue(&q, start_);
-  return map + ProgToString(this, &q);
+  return ProgToString(this, &q);
 }
 
 string Prog::DumpUnanchored() {
+  if (did_flatten_)
+    return FlattenedProgToString(this, start_unanchored_);
+
   Workq q(size_);
   AddToQueue(&q, start_unanchored_);
   return ProgToString(this, &q);
 }
 
+string Prog::DumpByteMap() {
+  string map;
+  for (int c = 0; c < 256; c++) {
+    int b = bytemap_[c];
+    int lo = c;
+    while (c < 256-1 && bytemap_[c+1] == b)
+      c++;
+    int hi = c;
+    StringAppendF(&map, "[%02x-%02x] -> %d\n", lo, hi, b);
+  }
+  return map;
+}
+
+int Prog::first_byte() {
+  std::call_once(first_byte_once_, [](Prog* prog) {
+    prog->first_byte_ = prog->ComputeFirstByte();
+  }, this);
+  return first_byte_;
+}
+
 static bool IsMatch(Prog*, Prog::Inst*);
 
 // Peep-hole optimizer.
@@ -260,7 +286,7 @@
   }
 }
 
-uint32 Prog::EmptyFlags(const StringPiece& text, const char* p) {
+uint32_t Prog::EmptyFlags(const StringPiece& text, const char* p) {
   int flags = 0;
 
   // ^ and \A
@@ -294,48 +320,504 @@
   return flags;
 }
 
-void Prog::MarkByteRange(int lo, int hi) {
-  CHECK_GE(lo, 0);
-  CHECK_GE(hi, 0);
-  CHECK_LE(lo, 255);
-  CHECK_LE(hi, 255);
-  if (lo > 0)
-    byterange_.Set(lo - 1);
-  byterange_.Set(hi);
+// ByteMapBuilder implements a coloring algorithm.
+//
+// The first phase is a series of "mark and merge" batches: we mark one or more
+// [lo-hi] ranges, then merge them into our internal state. Batching is not for
+// performance; rather, it means that the ranges are treated indistinguishably.
+//
+// Internally, the ranges are represented using a bitmap that stores the splits
+// and a vector that stores the colors; both of them are indexed by the ranges'
+// last bytes. Thus, in order to merge a [lo-hi] range, we split at lo-1 and at
+// hi (if not already split), then recolor each range in between. The color map
+// (i.e. from the old color to the new color) is maintained for the lifetime of
+// the batch and so underpins this somewhat obscure approach to set operations.
+//
+// The second phase builds the bytemap from our internal state: we recolor each
+// range, then store the new color (which is now the byte class) in each of the
+// corresponding array elements. Finally, we output the number of byte classes.
+class ByteMapBuilder {
+ public:
+  ByteMapBuilder() {
+    // Initial state: the [0-255] range has color 256.
+    // This will avoid problems during the second phase,
+    // in which we assign byte classes numbered from 0.
+    splits_.Set(255);
+    colors_.resize(256);
+    colors_[255] = 256;
+    nextcolor_ = 257;
+  }
+
+  void Mark(int lo, int hi);
+  void Merge();
+  void Build(uint8_t* bytemap, int* bytemap_range);
+
+ private:
+  int Recolor(int oldcolor);
+
+  Bitmap256 splits_;
+  std::vector<int> colors_;
+  int nextcolor_;
+  std::vector<std::pair<int, int>> colormap_;
+  std::vector<std::pair<int, int>> ranges_;
+
+  ByteMapBuilder(const ByteMapBuilder&) = delete;
+  ByteMapBuilder& operator=(const ByteMapBuilder&) = delete;
+};
+
+void ByteMapBuilder::Mark(int lo, int hi) {
+  DCHECK_GE(lo, 0);
+  DCHECK_GE(hi, 0);
+  DCHECK_LE(lo, 255);
+  DCHECK_LE(hi, 255);
+  DCHECK_LE(lo, hi);
+
+  // Ignore any [0-255] ranges. They cause us to recolor every range, which
+  // has no effect on the eventual result and is therefore a waste of time.
+  if (lo == 0 && hi == 255)
+    return;
+
+  ranges_.emplace_back(lo, hi);
+}
+
+void ByteMapBuilder::Merge() {
+  for (std::vector<std::pair<int, int>>::const_iterator it = ranges_.begin();
+       it != ranges_.end();
+       ++it) {
+    int lo = it->first-1;
+    int hi = it->second;
+
+    if (0 <= lo && !splits_.Test(lo)) {
+      splits_.Set(lo);
+      int next = splits_.FindNextSetBit(lo+1);
+      colors_[lo] = colors_[next];
+    }
+    if (!splits_.Test(hi)) {
+      splits_.Set(hi);
+      int next = splits_.FindNextSetBit(hi+1);
+      colors_[hi] = colors_[next];
+    }
+
+    int c = lo+1;
+    while (c < 256) {
+      int next = splits_.FindNextSetBit(c);
+      colors_[next] = Recolor(colors_[next]);
+      if (next == hi)
+        break;
+      c = next+1;
+    }
+  }
+  colormap_.clear();
+  ranges_.clear();
+}
+
+void ByteMapBuilder::Build(uint8_t* bytemap, int* bytemap_range) {
+  // Assign byte classes numbered from 0.
+  nextcolor_ = 0;
+
+  int c = 0;
+  while (c < 256) {
+    int next = splits_.FindNextSetBit(c);
+    uint8_t b = static_cast<uint8_t>(Recolor(colors_[next]));
+    while (c <= next) {
+      bytemap[c] = b;
+      c++;
+    }
+  }
+
+  *bytemap_range = nextcolor_;
+}
+
+int ByteMapBuilder::Recolor(int oldcolor) {
+  // Yes, this is a linear search. There can be at most 256
+  // colors and there will typically be far fewer than that.
+  // Also, we need to consider keys *and* values in order to
+  // avoid recoloring a given range more than once per batch.
+  std::vector<std::pair<int, int>>::const_iterator it =
+      std::find_if(colormap_.begin(), colormap_.end(),
+                   [=](const std::pair<int, int>& kv) -> bool {
+                     return kv.first == oldcolor || kv.second == oldcolor;
+                   });
+  if (it != colormap_.end())
+    return it->second;
+  int newcolor = nextcolor_;
+  nextcolor_++;
+  colormap_.emplace_back(oldcolor, newcolor);
+  return newcolor;
 }
 
 void Prog::ComputeByteMap() {
-  // Fill in bytemap with byte classes for prog_.
-  // Ranges of bytes that are treated as indistinguishable
-  // by the regexp program are mapped to a single byte class.
-  // The vector prog_->byterange() marks the end of each
-  // such range.
-  const Bitmap<256>& v = byterange();
+  // Fill in bytemap with byte classes for the program.
+  // Ranges of bytes that are treated indistinguishably
+  // will be mapped to a single byte class.
+  ByteMapBuilder builder;
 
-  COMPILE_ASSERT(8*sizeof(v.Word(0)) == 32, wordsize);
-  uint8 n = 0;
-  uint32 bits = 0;
-  for (int i = 0; i < 256; i++) {
-    if ((i&31) == 0)
-      bits = v.Word(i >> 5);
-    bytemap_[i] = n;
-    n += bits & 1;
-    bits >>= 1;
-  }
-  bytemap_range_ = bytemap_[255] + 1;
-  unbytemap_ = new uint8[bytemap_range_];
-  for (int i = 0; i < 256; i++)
-    unbytemap_[bytemap_[i]] = i;
+  // Don't repeat the work for ^ and $.
+  bool marked_line_boundaries = false;
+  // Don't repeat the work for \b and \B.
+  bool marked_word_boundaries = false;
 
-  if (0) {  // For debugging: use trivial byte map.
-    for (int i = 0; i < 256; i++) {
-      bytemap_[i] = i;
-      unbytemap_[i] = i;
+  for (int id = 0; id < size(); id++) {
+    Inst* ip = inst(id);
+    if (ip->opcode() == kInstByteRange) {
+      int lo = ip->lo();
+      int hi = ip->hi();
+      builder.Mark(lo, hi);
+      if (ip->foldcase() && lo <= 'z' && hi >= 'a') {
+        int foldlo = lo;
+        int foldhi = hi;
+        if (foldlo < 'a')
+          foldlo = 'a';
+        if (foldhi > 'z')
+          foldhi = 'z';
+        if (foldlo <= foldhi)
+          builder.Mark(foldlo + 'A' - 'a', foldhi + 'A' - 'a');
+      }
+      // If this Inst is not the last Inst in its list AND the next Inst is
+      // also a ByteRange AND the Insts have the same out, defer the merge.
+      if (!ip->last() &&
+          inst(id+1)->opcode() == kInstByteRange &&
+          ip->out() == inst(id+1)->out())
+        continue;
+      builder.Merge();
+    } else if (ip->opcode() == kInstEmptyWidth) {
+      if (ip->empty() & (kEmptyBeginLine|kEmptyEndLine) &&
+          !marked_line_boundaries) {
+        builder.Mark('\n', '\n');
+        builder.Merge();
+        marked_line_boundaries = true;
+      }
+      if (ip->empty() & (kEmptyWordBoundary|kEmptyNonWordBoundary) &&
+          !marked_word_boundaries) {
+        // We require two batches here: the first for ranges that are word
+        // characters, the second for ranges that are not word characters.
+        for (bool isword : {true, false}) {
+          int j;
+          for (int i = 0; i < 256; i = j) {
+            for (j = i + 1; j < 256 &&
+                            Prog::IsWordChar(static_cast<uint8_t>(i)) ==
+                                Prog::IsWordChar(static_cast<uint8_t>(j));
+                 j++)
+              ;
+            if (Prog::IsWordChar(static_cast<uint8_t>(i)) == isword)
+              builder.Mark(i, j - 1);
+          }
+          builder.Merge();
+        }
+        marked_word_boundaries = true;
+      }
     }
+  }
+
+  builder.Build(bytemap_, &bytemap_range_);
+
+  if (0) {  // For debugging, use trivial bytemap.
+    LOG(ERROR) << "Using trivial bytemap.";
+    for (int i = 0; i < 256; i++)
+      bytemap_[i] = static_cast<uint8_t>(i);
     bytemap_range_ = 256;
-    LOG(INFO) << "Using trivial bytemap.";
+  }
+}
+
+// Prog::Flatten() implements a graph rewriting algorithm.
+//
+// The overall process is similar to epsilon removal, but retains some epsilon
+// transitions: those from Capture and EmptyWidth instructions; and those from
+// nullable subexpressions. (The latter avoids quadratic blowup in transitions
+// in the worst case.) It might be best thought of as Alt instruction elision.
+//
+// In conceptual terms, it divides the Prog into "trees" of instructions, then
+// traverses the "trees" in order to produce "lists" of instructions. A "tree"
+// is one or more instructions that grow from one "root" instruction to one or
+// more "leaf" instructions; if a "tree" has exactly one instruction, then the
+// "root" is also the "leaf". In most cases, a "root" is the successor of some
+// "leaf" (i.e. the "leaf" instruction's out() returns the "root" instruction)
+// and is considered a "successor root". A "leaf" can be a ByteRange, Capture,
+// EmptyWidth or Match instruction. However, this is insufficient for handling
+// nested nullable subexpressions correctly, so in some cases, a "root" is the
+// dominator of the instructions reachable from some "successor root" (i.e. it
+// has an unreachable predecessor) and is considered a "dominator root". Since
+// only Alt instructions can be "dominator roots" (other instructions would be
+// "leaves"), only Alt instructions are required to be marked as predecessors.
+//
+// Dividing the Prog into "trees" comprises two passes: marking the "successor
+// roots" and the predecessors; and marking the "dominator roots". Sorting the
+// "successor roots" by their bytecode offsets enables iteration in order from
+// greatest to least during the second pass; by working backwards in this case
+// and flooding the graph no further than "leaves" and already marked "roots",
+// it becomes possible to mark "dominator roots" without doing excessive work.
+//
+// Traversing the "trees" is just iterating over the "roots" in order of their
+// marking and flooding the graph no further than "leaves" and "roots". When a
+// "leaf" is reached, the instruction is copied with its successor remapped to
+// its "root" number. When a "root" is reached, a Nop instruction is generated
+// with its successor remapped similarly. As each "list" is produced, its last
+// instruction is marked as such. After all of the "lists" have been produced,
+// a pass over their instructions remaps their successors to bytecode offsets.
+void Prog::Flatten() {
+  if (did_flatten_)
+    return;
+  did_flatten_ = true;
+
+  // Scratch structures. It's important that these are reused by functions
+  // that we call in loops because they would thrash the heap otherwise.
+  SparseSet reachable(size());
+  std::vector<int> stk;
+  stk.reserve(size());
+
+  // First pass: Marks "successor roots" and predecessors.
+  // Builds the mapping from inst-ids to root-ids.
+  SparseArray<int> rootmap(size());
+  SparseArray<int> predmap(size());
+  std::vector<std::vector<int>> predvec;
+  MarkSuccessors(&rootmap, &predmap, &predvec, &reachable, &stk);
+
+  // Second pass: Marks "dominator roots".
+  SparseArray<int> sorted(rootmap);
+  std::sort(sorted.begin(), sorted.end(), sorted.less);
+  for (SparseArray<int>::const_iterator i = sorted.end() - 1;
+       i != sorted.begin();
+       --i) {
+    if (i->index() != start_unanchored() && i->index() != start())
+      MarkDominator(i->index(), &rootmap, &predmap, &predvec, &reachable, &stk);
+  }
+
+  // Third pass: Emits "lists". Remaps outs to root-ids.
+  // Builds the mapping from root-ids to flat-ids.
+  std::vector<int> flatmap(rootmap.size());
+  std::vector<Inst> flat;
+  flat.reserve(size());
+  for (SparseArray<int>::const_iterator i = rootmap.begin();
+       i != rootmap.end();
+       ++i) {
+    flatmap[i->value()] = static_cast<int>(flat.size());
+    EmitList(i->index(), &rootmap, &flat, &reachable, &stk);
+    flat.back().set_last();
+  }
+
+  list_count_ = static_cast<int>(flatmap.size());
+  for (int i = 0; i < kNumInst; i++)
+    inst_count_[i] = 0;
+
+  // Fourth pass: Remaps outs to flat-ids.
+  // Counts instructions by opcode.
+  for (int id = 0; id < static_cast<int>(flat.size()); id++) {
+    Inst* ip = &flat[id];
+    if (ip->opcode() != kInstAltMatch)  // handled in EmitList()
+      ip->set_out(flatmap[ip->out()]);
+    inst_count_[ip->opcode()]++;
+  }
+
+  int total = 0;
+  for (int i = 0; i < kNumInst; i++)
+    total += inst_count_[i];
+  DCHECK_EQ(total, static_cast<int>(flat.size()));
+
+  // Remap start_unanchored and start.
+  if (start_unanchored() == 0) {
+    DCHECK_EQ(start(), 0);
+  } else if (start_unanchored() == start()) {
+    set_start_unanchored(flatmap[1]);
+    set_start(flatmap[1]);
+  } else {
+    set_start_unanchored(flatmap[1]);
+    set_start(flatmap[2]);
+  }
+
+  // Finally, replace the old instructions with the new instructions.
+  size_ = static_cast<int>(flat.size());
+  inst_ = PODArray<Inst>(size_);
+  memmove(inst_.data(), flat.data(), size_*sizeof(inst_[0]));
+}
+
+void Prog::MarkSuccessors(SparseArray<int>* rootmap,
+                          SparseArray<int>* predmap,
+                          std::vector<std::vector<int>>* predvec,
+                          SparseSet* reachable, std::vector<int>* stk) {
+  // Mark the kInstFail instruction.
+  rootmap->set_new(0, rootmap->size());
+
+  // Mark the start_unanchored and start instructions.
+  if (!rootmap->has_index(start_unanchored()))
+    rootmap->set_new(start_unanchored(), rootmap->size());
+  if (!rootmap->has_index(start()))
+    rootmap->set_new(start(), rootmap->size());
+
+  reachable->clear();
+  stk->clear();
+  stk->push_back(start_unanchored());
+  while (!stk->empty()) {
+    int id = stk->back();
+    stk->pop_back();
+  Loop:
+    if (reachable->contains(id))
+      continue;
+    reachable->insert_new(id);
+
+    Inst* ip = inst(id);
+    switch (ip->opcode()) {
+      default:
+        LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
+        break;
+
+      case kInstAltMatch:
+      case kInstAlt:
+        // Mark this instruction as a predecessor of each out.
+        for (int out : {ip->out(), ip->out1()}) {
+          if (!predmap->has_index(out)) {
+            predmap->set_new(out, static_cast<int>(predvec->size()));
+            predvec->emplace_back();
+          }
+          (*predvec)[predmap->get_existing(out)].emplace_back(id);
+        }
+        stk->push_back(ip->out1());
+        id = ip->out();
+        goto Loop;
+
+      case kInstByteRange:
+      case kInstCapture:
+      case kInstEmptyWidth:
+        // Mark the out of this instruction as a "root".
+        if (!rootmap->has_index(ip->out()))
+          rootmap->set_new(ip->out(), rootmap->size());
+        id = ip->out();
+        goto Loop;
+
+      case kInstNop:
+        id = ip->out();
+        goto Loop;
+
+      case kInstMatch:
+      case kInstFail:
+        break;
+    }
+  }
+}
+
+void Prog::MarkDominator(int root, SparseArray<int>* rootmap,
+                         SparseArray<int>* predmap,
+                         std::vector<std::vector<int>>* predvec,
+                         SparseSet* reachable, std::vector<int>* stk) {
+  reachable->clear();
+  stk->clear();
+  stk->push_back(root);
+  while (!stk->empty()) {
+    int id = stk->back();
+    stk->pop_back();
+  Loop:
+    if (reachable->contains(id))
+      continue;
+    reachable->insert_new(id);
+
+    if (id != root && rootmap->has_index(id)) {
+      // We reached another "tree" via epsilon transition.
+      continue;
+    }
+
+    Inst* ip = inst(id);
+    switch (ip->opcode()) {
+      default:
+        LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
+        break;
+
+      case kInstAltMatch:
+      case kInstAlt:
+        stk->push_back(ip->out1());
+        id = ip->out();
+        goto Loop;
+
+      case kInstByteRange:
+      case kInstCapture:
+      case kInstEmptyWidth:
+        break;
+
+      case kInstNop:
+        id = ip->out();
+        goto Loop;
+
+      case kInstMatch:
+      case kInstFail:
+        break;
+    }
+  }
+
+  for (SparseSet::const_iterator i = reachable->begin();
+       i != reachable->end();
+       ++i) {
+    int id = *i;
+    if (predmap->has_index(id)) {
+      for (int pred : (*predvec)[predmap->get_existing(id)]) {
+        if (!reachable->contains(pred)) {
+          // id has a predecessor that cannot be reached from root!
+          // Therefore, id must be a "root" too - mark it as such.
+          if (!rootmap->has_index(id))
+            rootmap->set_new(id, rootmap->size());
+        }
+      }
+    }
+  }
+}
+
+void Prog::EmitList(int root, SparseArray<int>* rootmap,
+                    std::vector<Inst>* flat,
+                    SparseSet* reachable, std::vector<int>* stk) {
+  reachable->clear();
+  stk->clear();
+  stk->push_back(root);
+  while (!stk->empty()) {
+    int id = stk->back();
+    stk->pop_back();
+  Loop:
+    if (reachable->contains(id))
+      continue;
+    reachable->insert_new(id);
+
+    if (id != root && rootmap->has_index(id)) {
+      // We reached another "tree" via epsilon transition. Emit a kInstNop
+      // instruction so that the Prog does not become quadratically larger.
+      flat->emplace_back();
+      flat->back().set_opcode(kInstNop);
+      flat->back().set_out(rootmap->get_existing(id));
+      continue;
+    }
+
+    Inst* ip = inst(id);
+    switch (ip->opcode()) {
+      default:
+        LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
+        break;
+
+      case kInstAltMatch:
+        flat->emplace_back();
+        flat->back().set_opcode(kInstAltMatch);
+        flat->back().set_out(static_cast<int>(flat->size()));
+        flat->back().out1_ = static_cast<uint32_t>(flat->size())+1;
+        FALLTHROUGH_INTENDED;
+
+      case kInstAlt:
+        stk->push_back(ip->out1());
+        id = ip->out();
+        goto Loop;
+
+      case kInstByteRange:
+      case kInstCapture:
+      case kInstEmptyWidth:
+        flat->emplace_back();
+        memmove(&flat->back(), ip, sizeof *ip);
+        flat->back().set_out(rootmap->get_existing(ip->out()));
+        break;
+
+      case kInstNop:
+        id = ip->out();
+        goto Loop;
+
+      case kInstMatch:
+      case kInstFail:
+        flat->emplace_back();
+        memmove(&flat->back(), ip, sizeof *ip);
+        break;
+    }
   }
 }
 
 }  // namespace re2
-
diff --git a/re2/prog.h b/re2/prog.h
index 2cf65bc..268ab9d 100644
--- a/re2/prog.h
+++ b/re2/prog.h
@@ -2,50 +2,29 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+#ifndef RE2_PROG_H_
+#define RE2_PROG_H_
+
 // Compiled representation of regular expressions.
 // See regexp.h for the Regexp class, which represents a regular
 // expression symbolically.
 
-#ifndef RE2_PROG_H__
-#define RE2_PROG_H__
+#include <stdint.h>
+#include <functional>
+#include <mutex>
+#include <string>
+#include <vector>
+#include <type_traits>
 
 #include "util/util.h"
+#include "util/logging.h"
+#include "util/pod_array.h"
+#include "util/sparse_array.h"
+#include "util/sparse_set.h"
 #include "re2/re2.h"
 
 namespace re2 {
 
-// Simple fixed-size bitmap.
-template<int Bits>
-class Bitmap {
- public:
-  Bitmap() { Reset(); }
-  int Size() { return Bits; }
-
-  void Reset() {
-    for (int i = 0; i < Words; i++)
-      w_[i] = 0;
-  }
-  bool Get(int k) const {
-    return w_[k >> WordLog] & (1<<(k & 31));
-  }
-  void Set(int k) {
-    w_[k >> WordLog] |= 1<<(k & 31);
-  }
-  void Clear(int k) {
-    w_[k >> WordLog] &= ~(1<<(k & 31));
-  }
-  uint32 Word(int i) const {
-    return w_[i];
-  }
-
- private:
-  static const int WordLog = 5;
-  static const int Words = (Bits+31)/32;
-  uint32 w_[Words];
-  DISALLOW_EVIL_CONSTRUCTORS(Bitmap);
-};
-
-
 // Opcodes for Inst
 enum InstOp {
   kInstAlt = 0,      // choose between out_ and out1_
@@ -56,6 +35,7 @@
   kInstMatch,        // found a match!
   kInstNop,          // no-op; occasionally unavoidable
   kInstFail,         // never match; occasionally unavoidable
+  kNumInst,
 };
 
 // Bit flags for empty-width specials
@@ -69,10 +49,8 @@
   kEmptyAllFlags         = (1<<6)-1,
 };
 
-class Regexp;
-
 class DFA;
-struct OneState;
+class Regexp;
 
 // Compiled form of regexp program.
 class Prog {
@@ -83,31 +61,40 @@
   // Single instruction in regexp program.
   class Inst {
    public:
-    Inst() : out_opcode_(0), out1_(0) { }
+    // See the assertion below for why this is so.
+    Inst() = default;
+
+    // Copyable.
+    Inst(const Inst&) = default;
+    Inst& operator=(const Inst&) = default;
 
     // Constructors per opcode
-    void InitAlt(uint32 out, uint32 out1);
-    void InitByteRange(int lo, int hi, int foldcase, uint32 out);
-    void InitCapture(int cap, uint32 out);
-    void InitEmptyWidth(EmptyOp empty, uint32 out);
+    void InitAlt(uint32_t out, uint32_t out1);
+    void InitByteRange(int lo, int hi, int foldcase, uint32_t out);
+    void InitCapture(int cap, uint32_t out);
+    void InitEmptyWidth(EmptyOp empty, uint32_t out);
     void InitMatch(int id);
-    void InitNop(uint32 out);
+    void InitNop(uint32_t out);
     void InitFail();
 
     // Getters
-    int id(Prog* p) { return this - p->inst_; }
+    int id(Prog* p) { return static_cast<int>(this - p->inst_.data()); }
     InstOp opcode() { return static_cast<InstOp>(out_opcode_&7); }
-    int out()     { return out_opcode_>>3; }
-    int out1()    { DCHECK(opcode() == kInstAlt || opcode() == kInstAltMatch); return out1_; }
+    int last()      { return (out_opcode_>>3)&1; }
+    int out()       { return out_opcode_>>4; }
+    int out1()      { DCHECK(opcode() == kInstAlt || opcode() == kInstAltMatch); return out1_; }
     int cap()       { DCHECK_EQ(opcode(), kInstCapture); return cap_; }
     int lo()        { DCHECK_EQ(opcode(), kInstByteRange); return lo_; }
     int hi()        { DCHECK_EQ(opcode(), kInstByteRange); return hi_; }
     int foldcase()  { DCHECK_EQ(opcode(), kInstByteRange); return foldcase_; }
     int match_id()  { DCHECK_EQ(opcode(), kInstMatch); return match_id_; }
     EmptyOp empty() { DCHECK_EQ(opcode(), kInstEmptyWidth); return empty_; }
-    bool greedy(Prog *p) {
+
+    bool greedy(Prog* p) {
       DCHECK_EQ(opcode(), kInstAltMatch);
-      return p->inst(out())->opcode() == kInstByteRange;
+      return p->inst(out())->opcode() == kInstByteRange ||
+             (p->inst(out())->opcode() == kInstNop &&
+              p->inst(p->inst(out())->out())->opcode() == kInstByteRange);
     }
 
     // Does this inst (an kInstByteRange) match c?
@@ -122,54 +109,61 @@
     string Dump();
 
     // Maximum instruction id.
-    // (Must fit in out_opcode_, and PatchList steals another bit.)
+    // (Must fit in out_opcode_. PatchList/last steal another bit.)
     static const int kMaxInst = (1<<28) - 1;
 
    private:
     void set_opcode(InstOp opcode) {
-      out_opcode_ = (out()<<3) | opcode;
+      out_opcode_ = (out()<<4) | (last()<<3) | opcode;
+    }
+
+    void set_last() {
+      out_opcode_ = (out()<<4) | (1<<3) | opcode();
     }
 
     void set_out(int out) {
-      out_opcode_ = (out<<3) | opcode();
+      out_opcode_ = (out<<4) | (last()<<3) | opcode();
     }
 
     void set_out_opcode(int out, InstOp opcode) {
-      out_opcode_ = (out<<3) | opcode;
+      out_opcode_ = (out<<4) | (last()<<3) | opcode;
     }
 
-    uint32 out_opcode_;  // 29 bits of out, 3 (low) bits opcode
-    union {              // additional instruction arguments:
-      uint32 out1_;      // opcode == kInstAlt
-                         //   alternate next instruction
+    uint32_t out_opcode_;   // 28 bits: out, 1 bit: last, 3 (low) bits: opcode
+    union {                 // additional instruction arguments:
+      uint32_t out1_;       // opcode == kInstAlt
+                            //   alternate next instruction
 
-      int32 cap_;        // opcode == kInstCapture
-                         //   Index of capture register (holds text
-                         //   position recorded by capturing parentheses).
-                         //   For \n (the submatch for the nth parentheses),
-                         //   the left parenthesis captures into register 2*n
-                         //   and the right one captures into register 2*n+1.
+      int32_t cap_;         // opcode == kInstCapture
+                            //   Index of capture register (holds text
+                            //   position recorded by capturing parentheses).
+                            //   For \n (the submatch for the nth parentheses),
+                            //   the left parenthesis captures into register 2*n
+                            //   and the right one captures into register 2*n+1.
 
-      int32 match_id_;   // opcode == kInstMatch
-                         //   Match ID to identify this match (for re2::Set).
+      int32_t match_id_;    // opcode == kInstMatch
+                            //   Match ID to identify this match (for re2::Set).
 
-      struct {           // opcode == kInstByteRange
-        uint8 lo_;       //   byte range is lo_-hi_ inclusive
-        uint8 hi_;       //
-        uint8 foldcase_; //   convert A-Z to a-z before checking range.
+      struct {              // opcode == kInstByteRange
+        uint8_t lo_;        //   byte range is lo_-hi_ inclusive
+        uint8_t hi_;        //
+        uint8_t foldcase_;  //   convert A-Z to a-z before checking range.
       };
 
-      EmptyOp empty_;    // opcode == kInstEmptyWidth
-                         //   empty_ is bitwise OR of kEmpty* flags above.
+      EmptyOp empty_;       // opcode == kInstEmptyWidth
+                            //   empty_ is bitwise OR of kEmpty* flags above.
     };
 
     friend class Compiler;
     friend struct PatchList;
     friend class Prog;
-
-    DISALLOW_EVIL_CONSTRUCTORS(Inst);
   };
 
+  // Inst must be trivial so that we can freely clear it with memset(3).
+  // Arrays of Inst are initialised by copying the initial elements with
+  // memmove(3) and then clearing any remaining elements with memset(3).
+  static_assert(std::is_trivial<Inst>::value, "Inst must be trivial");
+
   // Whether to anchor the search.
   enum Anchor {
     kUnanchored,  // match anywhere
@@ -200,13 +194,13 @@
   int start_unanchored() { return start_unanchored_; }
   void set_start(int start) { start_ = start; }
   void set_start_unanchored(int start) { start_unanchored_ = start; }
-  int64 size() { return size_; }
+  int size() { return size_; }
   bool reversed() { return reversed_; }
   void set_reversed(bool reversed) { reversed_ = reversed; }
-  int64 byte_inst_count() { return byte_inst_count_; }
-  const Bitmap<256>& byterange() { return byterange_; }
-  void set_dfa_mem(int64 dfa_mem) { dfa_mem_ = dfa_mem; }
-  int64 dfa_mem() { return dfa_mem_; }
+  int list_count() { return list_count_; }
+  int inst_count(InstOp op) { return inst_count_[op]; }
+  void set_dfa_mem(int64_t dfa_mem) { dfa_mem_ = dfa_mem; }
+  int64_t dfa_mem() { return dfa_mem_; }
   int flags() { return flags_; }
   void set_flags(int flags) { flags_ = flags; }
   bool anchor_start() { return anchor_start_; }
@@ -214,21 +208,19 @@
   bool anchor_end() { return anchor_end_; }
   void set_anchor_end(bool b) { anchor_end_ = b; }
   int bytemap_range() { return bytemap_range_; }
-  const uint8* bytemap() { return bytemap_; }
+  const uint8_t* bytemap() { return bytemap_; }
+
+  // Lazily computed.
+  int first_byte();
 
   // Returns string representation of program for debugging.
   string Dump();
   string DumpUnanchored();
-
-  // Record that at some point in the prog, the bytes in the range
-  // lo-hi (inclusive) are treated as different from bytes outside the range.
-  // Tracking this lets the DFA collapse commonly-treated byte ranges
-  // when recording state pointers, greatly reducing its memory footprint.
-  void MarkByteRange(int lo, int hi);
+  string DumpByteMap();
 
   // Returns the set of kEmpty flags that are in effect at
   // position p within context.
-  static uint32 EmptyFlags(const StringPiece& context, const char* p);
+  static uint32_t EmptyFlags(const StringPiece& context, const char* p);
 
   // Returns whether byte c is a word character: ASCII only.
   // Used by the implementation of \b and \B.
@@ -237,7 +229,7 @@
   //     (the DFA has only one-byte lookahead).
   //   - even if the lookahead were possible, the Progs would be huge.
   // This crude approximation is the same one PCRE uses.
-  static bool IsWordChar(uint8 c) {
+  static bool IsWordChar(uint8_t c) {
     return ('A' <= c && c <= 'Z') ||
            ('a' <= c && c <= 'z') ||
            ('0' <= c && c <= '9') ||
@@ -270,19 +262,37 @@
   // If matches != NULL and kind == kManyMatch and there is a match,
   // SearchDFA fills matches with the match IDs of the final matching state.
   bool SearchDFA(const StringPiece& text, const StringPiece& context,
-                 Anchor anchor, MatchKind kind,
-                 StringPiece* match0, bool* failed,
-                 vector<int>* matches);
+                 Anchor anchor, MatchKind kind, StringPiece* match0,
+                 bool* failed, SparseSet* matches);
 
-  // Build the entire DFA for the given match kind.  FOR TESTING ONLY.
+  // The callback issued after building each DFA state with BuildEntireDFA().
+  // If next is null, then the memory budget has been exhausted and building
+  // will halt. Otherwise, the state has been built and next points to an array
+  // of bytemap_range()+1 slots holding the next states as per the bytemap and
+  // kByteEndText. The number of the state is implied by the callback sequence:
+  // the first callback is for state 0, the second callback is for state 1, ...
+  // match indicates whether the state is a matching state.
+  using DFAStateCallback = std::function<void(const int* next, bool match)>;
+
+  // Build the entire DFA for the given match kind.
   // Usually the DFA is built out incrementally, as needed, which
-  // avoids lots of unnecessary work.  This function is useful only
-  // for testing purposes.  Returns number of states.
-  int BuildEntireDFA(MatchKind kind);
+  // avoids lots of unnecessary work.
+  // If cb is not empty, it receives one callback per state built.
+  // Returns the number of states built.
+  // FOR TESTING OR EXPERIMENTAL PURPOSES ONLY.
+  int BuildEntireDFA(MatchKind kind, const DFAStateCallback& cb);
 
-  // Compute byte map.
+  // Controls whether the DFA should bail out early if the NFA would be faster.
+  // FOR TESTING ONLY.
+  static void TEST_dfa_should_bail_when_slow(bool b);
+
+  // Compute bytemap.
   void ComputeByteMap();
 
+  // Computes whether all matches must begin with the same first
+  // byte, and if so, returns that byte.  If not, returns -1.
+  int ComputeFirstByte();
+
   // Run peep-hole optimizer on program.
   void Optimize();
 
@@ -329,48 +339,80 @@
   // Returns true on success, false on error.
   bool PossibleMatchRange(string* min, string* max, int maxlen);
 
+  // EXPERIMENTAL! SUBJECT TO CHANGE!
+  // Outputs the program fanout into the given sparse array.
+  void Fanout(SparseArray<int>* fanout);
+
   // Compiles a collection of regexps to Prog.  Each regexp will have
-  // its own Match instruction recording the index in the vector.
-  static Prog* CompileSet(const RE2::Options& options, RE2::Anchor anchor,
-                          Regexp* re);
+  // its own Match instruction recording the index in the output vector.
+  static Prog* CompileSet(Regexp* re, RE2::Anchor anchor, int64_t max_mem);
+
+  // Flattens the Prog from "tree" form to "list" form. This is an in-place
+  // operation in the sense that the old instructions are lost.
+  void Flatten();
+
+  // Walks the Prog; the "successor roots" or predecessors of the reachable
+  // instructions are marked in rootmap or predmap/predvec, respectively.
+  // reachable and stk are preallocated scratch structures.
+  void MarkSuccessors(SparseArray<int>* rootmap,
+                      SparseArray<int>* predmap,
+                      std::vector<std::vector<int>>* predvec,
+                      SparseSet* reachable, std::vector<int>* stk);
+
+  // Walks the Prog from the given "root" instruction; the "dominator root"
+  // of the reachable instructions (if such exists) is marked in rootmap.
+  // reachable and stk are preallocated scratch structures.
+  void MarkDominator(int root, SparseArray<int>* rootmap,
+                     SparseArray<int>* predmap,
+                     std::vector<std::vector<int>>* predvec,
+                     SparseSet* reachable, std::vector<int>* stk);
+
+  // Walks the Prog from the given "root" instruction; the reachable
+  // instructions are emitted in "list" form and appended to flat.
+  // reachable and stk are preallocated scratch structures.
+  void EmitList(int root, SparseArray<int>* rootmap,
+                std::vector<Inst>* flat,
+                SparseSet* reachable, std::vector<int>* stk);
 
  private:
   friend class Compiler;
 
   DFA* GetDFA(MatchKind kind);
+  void DeleteDFA(DFA* dfa);
 
   bool anchor_start_;       // regexp has explicit start anchor
   bool anchor_end_;         // regexp has explicit end anchor
   bool reversed_;           // whether program runs backward over input
+  bool did_flatten_;        // has Flatten been called?
   bool did_onepass_;        // has IsOnePass been called?
 
   int start_;               // entry point for program
   int start_unanchored_;    // unanchored entry point for program
   int size_;                // number of instructions
-  int byte_inst_count_;     // number of kInstByteRange instructions
   int bytemap_range_;       // bytemap_[x] < bytemap_range_
+  int first_byte_;          // required first byte for match, or -1 if none
   int flags_;               // regexp parse flags
-  int onepass_statesize_;   // byte size of each OneState* node
 
-  Inst* inst_;              // pointer to instruction array
+  int list_count_;            // count of lists (see above)
+  int inst_count_[kNumInst];  // count of instructions by opcode
 
-  Mutex dfa_mutex_;    // Protects dfa_first_, dfa_longest_
-  DFA* volatile dfa_first_;     // DFA cached for kFirstMatch
-  DFA* volatile dfa_longest_;   // DFA cached for kLongestMatch and kFullMatch
-  int64 dfa_mem_;      // Maximum memory for DFAs.
-  void (*delete_dfa_)(DFA* dfa);
+  PODArray<Inst> inst_;     // pointer to instruction array
+  uint8_t* onepass_nodes_;  // data for OnePass nodes
 
-  Bitmap<256> byterange_;    // byterange.Get(x) true if x ends a
-                             // commonly-treated byte range.
-  uint8 bytemap_[256];       // map from input bytes to byte classes
-  uint8 *unbytemap_;         // bytemap_[unbytemap_[x]] == x
+  int64_t dfa_mem_;         // Maximum memory for DFAs.
+  DFA* dfa_first_;          // DFA cached for kFirstMatch/kManyMatch
+  DFA* dfa_longest_;        // DFA cached for kLongestMatch/kFullMatch
 
-  uint8* onepass_nodes_;     // data for OnePass nodes
-  OneState* onepass_start_;  // start node for OnePass program
+  uint8_t bytemap_[256];    // map from input bytes to byte classes
 
-  DISALLOW_EVIL_CONSTRUCTORS(Prog);
+  std::once_flag first_byte_once_;
+  std::once_flag dfa_first_once_;
+  std::once_flag dfa_longest_once_;
+
+  Prog(const Prog&) = delete;
+  Prog& operator=(const Prog&) = delete;
 };
 
 }  // namespace re2
 
-#endif  // RE2_PROG_H__
+#endif  // RE2_PROG_H_
diff --git a/re2/re2.cc b/re2/re2.cc
index 8d1d468..1529807 100644
--- a/re2/re2.cc
+++ b/re2/re2.cc
@@ -9,32 +9,34 @@
 
 #include "re2/re2.h"
 
-#include <stdio.h>
-#include <string>
-#include <pthread.h>
+#include <assert.h>
+#include <ctype.h>
 #include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+#include <iterator>
+#include <mutex>
+#include <string>
+#include <utility>
+#include <vector>
+
 #include "util/util.h"
-#include "util/flags.h"
+#include "util/logging.h"
+#include "util/sparse_array.h"
+#include "util/strutil.h"
+#include "util/utf.h"
 #include "re2/prog.h"
 #include "re2/regexp.h"
 
-DEFINE_bool(trace_re2, false, "trace RE2 execution");
-
 namespace re2 {
 
 // Maximum number of args we can set
 static const int kMaxArgs = 16;
 static const int kVecSize = 1+kMaxArgs;
 
-const VariadicFunction2<bool, const StringPiece&, const RE2&, RE2::Arg, RE2::FullMatchN> RE2::FullMatch;
-const VariadicFunction2<bool, const StringPiece&, const RE2&, RE2::Arg, RE2::PartialMatchN> RE2::PartialMatch;
-const VariadicFunction2<bool, StringPiece*, const RE2&, RE2::Arg, RE2::ConsumeN> RE2::Consume;
-const VariadicFunction2<bool, StringPiece*, const RE2&, RE2::Arg, RE2::FindAndConsumeN> RE2::FindAndConsume;
-
-// This will trigger LNK2005 error in MSVC.
-#ifndef COMPILER_MSVC
 const int RE2::Options::kDefaultMaxMem;  // initialized in re2.h
-#endif  // COMPILER_MSVC
 
 RE2::Options::Options(RE2::CannedOptions opt)
   : encoding_(opt == RE2::Latin1 ? EncodingLatin1 : EncodingUTF8),
@@ -44,6 +46,7 @@
     max_mem_(kDefaultMaxMem),
     literal_(false),
     never_nl_(false),
+    dot_nl_(false),
     never_capture_(false),
     case_sensitive_(true),
     perl_classes_(false),
@@ -51,22 +54,11 @@
     one_line_(false) {
 }
 
-// static empty things for use as const references.
-// To avoid global constructors, initialized on demand.
-GLOBAL_MUTEX(empty_mutex);
-static const string *empty_string;
-static const map<string, int> *empty_named_groups;
-static const map<int, string> *empty_group_names;
-
-static void InitEmpty() {
-  GLOBAL_MUTEX_LOCK(empty_mutex);
-  if (empty_string == NULL) {
-    empty_string = new string;
-    empty_named_groups = new map<string, int>;
-    empty_group_names = new map<int, string>;
-  }
-  GLOBAL_MUTEX_UNLOCK(empty_mutex);
-}
+// static empty objects for use as const references.
+// To avoid global constructors, allocated in RE2::Init().
+static const string* empty_string;
+static const std::map<string, int>* empty_named_groups;
+static const std::map<int, string>* empty_group_names;
 
 // Converts from Regexp error code to RE2 error code.
 // Maybe some day they will diverge.  In any event, this
@@ -107,8 +99,8 @@
 
 static string trunc(const StringPiece& pattern) {
   if (pattern.size() < 100)
-    return pattern.as_string();
-  return pattern.substr(0, 100).as_string() + "...";
+    return string(pattern);
+  return string(pattern.substr(0, 100)) + "...";
 }
 
 
@@ -151,6 +143,9 @@
   if (never_nl())
     flags |= Regexp::NeverNL;
 
+  if (dot_nl())
+    flags |= Regexp::DotNL;
+
   if (never_capture())
     flags |= Regexp::NeverCapture;
 
@@ -170,19 +165,24 @@
 }
 
 void RE2::Init(const StringPiece& pattern, const Options& options) {
-  mutex_ = new Mutex;
-  pattern_ = pattern.as_string();
+  static std::once_flag empty_once;
+  std::call_once(empty_once, []() {
+    empty_string = new string;
+    empty_named_groups = new std::map<string, int>;
+    empty_group_names = new std::map<int, string>;
+  });
+
+  pattern_ = string(pattern);
   options_.Copy(options);
-  InitEmpty();
+  entire_regexp_ = NULL;
+  suffix_regexp_ = NULL;
+  prog_ = NULL;
+  num_captures_ = -1;
+  rprog_ = NULL;
   error_ = empty_string;
   error_code_ = NoError;
-  suffix_regexp_ = NULL;
-  entire_regexp_ = NULL;
-  prog_ = NULL;
-  rprog_ = NULL;
   named_groups_ = NULL;
   group_names_ = NULL;
-  num_captures_ = -1;
 
   RegexpStatus status;
   entire_regexp_ = Regexp::Parse(
@@ -190,19 +190,16 @@
     static_cast<Regexp::ParseFlags>(options_.ParseFlags()),
     &status);
   if (entire_regexp_ == NULL) {
-    if (error_ == empty_string)
-      error_ = new string(status.Text());
     if (options_.log_errors()) {
       LOG(ERROR) << "Error parsing '" << trunc(pattern_) << "': "
                  << status.Text();
     }
-    error_arg_ = status.error_arg().as_string();
+    error_ = new string(status.Text());
     error_code_ = RegexpErrorToRE2(status.code());
+    error_arg_ = string(status.error_arg());
     return;
   }
 
-  prefix_.clear();
-  prefix_foldcase_ = false;
   re2::Regexp* suffix;
   if (entire_regexp_->RequiredPrefix(&prefix_, &prefix_foldcase_, &suffix))
     suffix_regexp_ = suffix;
@@ -221,6 +218,11 @@
     return;
   }
 
+  // We used to compute this lazily, but it's used during the
+  // typical control flow for a match call, so we now compute
+  // it eagerly, which avoids the overhead of std::once_flag.
+  num_captures_ = suffix_regexp_->NumCaptures();
+
   // Could delay this until the first match call that
   // cares about submatch information, but the one-pass
   // machine's memory gets cut from the DFA memory budget,
@@ -231,17 +233,16 @@
 
 // Returns rprog_, computing it if needed.
 re2::Prog* RE2::ReverseProg() const {
-  MutexLock l(mutex_);
-  if (rprog_ == NULL && error_ == empty_string) {
-    rprog_ = suffix_regexp_->CompileToReverseProg(options_.max_mem()/3);
-    if (rprog_ == NULL) {
-      if (options_.log_errors())
-        LOG(ERROR) << "Error reverse compiling '" << trunc(pattern_) << "'";
-      error_ = new string("pattern too large - reverse compile failed");
-      error_code_ = RE2::ErrorPatternTooLarge;
-      return NULL;
+  std::call_once(rprog_once_, [](const RE2* re) {
+    re->rprog_ =
+        re->suffix_regexp_->CompileToReverseProg(re->options_.max_mem() / 3);
+    if (re->rprog_ == NULL) {
+      if (re->options_.log_errors())
+        LOG(ERROR) << "Error reverse compiling '" << trunc(re->pattern_) << "'";
+      re->error_ = new string("pattern too large - reverse compile failed");
+      re->error_code_ = RE2::ErrorPatternTooLarge;
     }
-  }
+  }, this);
   return rprog_;
 }
 
@@ -250,7 +251,6 @@
     suffix_regexp_->Decref();
   if (entire_regexp_)
     entire_regexp_->Decref();
-  delete mutex_;
   delete prog_;
   delete rprog_;
   if (error_ != empty_string)
@@ -267,29 +267,64 @@
   return prog_->size();
 }
 
-// Returns named_groups_, computing it if needed.
-const map<string, int>&  RE2::NamedCapturingGroups() const {
-  MutexLock l(mutex_);
-  if (!ok())
-    return *empty_named_groups;
-  if (named_groups_ == NULL) {
-    named_groups_ = suffix_regexp_->NamedCaptures();
-    if (named_groups_ == NULL)
-      named_groups_ = empty_named_groups;
+int RE2::ReverseProgramSize() const {
+  if (prog_ == NULL)
+    return -1;
+  Prog* prog = ReverseProg();
+  if (prog == NULL)
+    return -1;
+  return prog->size();
+}
+
+static int Fanout(Prog* prog, std::map<int, int>* histogram) {
+  SparseArray<int> fanout(prog->size());
+  prog->Fanout(&fanout);
+  histogram->clear();
+  for (SparseArray<int>::iterator i = fanout.begin(); i != fanout.end(); ++i) {
+    // TODO(junyer): Optimise this?
+    int bucket = 0;
+    while (1 << bucket < i->value()) {
+      bucket++;
+    }
+    (*histogram)[bucket]++;
   }
+  return histogram->rbegin()->first;
+}
+
+int RE2::ProgramFanout(std::map<int, int>* histogram) const {
+  if (prog_ == NULL)
+    return -1;
+  return Fanout(prog_, histogram);
+}
+
+int RE2::ReverseProgramFanout(std::map<int, int>* histogram) const {
+  if (prog_ == NULL)
+    return -1;
+  Prog* prog = ReverseProg();
+  if (prog == NULL)
+    return -1;
+  return Fanout(prog, histogram);
+}
+
+// Returns named_groups_, computing it if needed.
+const std::map<string, int>& RE2::NamedCapturingGroups() const {
+  std::call_once(named_groups_once_, [](const RE2* re) {
+    if (re->suffix_regexp_ != NULL)
+      re->named_groups_ = re->suffix_regexp_->NamedCaptures();
+    if (re->named_groups_ == NULL)
+      re->named_groups_ = empty_named_groups;
+  }, this);
   return *named_groups_;
 }
 
 // Returns group_names_, computing it if needed.
-const map<int, string>&  RE2::CapturingGroupNames() const {
-  MutexLock l(mutex_);
-  if (!ok())
-    return *empty_group_names;
-  if (group_names_ == NULL) {
-    group_names_ = suffix_regexp_->CaptureNames();
-    if (group_names_ == NULL)
-      group_names_ = empty_group_names;
-  }
+const std::map<int, string>& RE2::CapturingGroupNames() const {
+  std::call_once(group_names_once_, [](const RE2* re) {
+    if (re->suffix_regexp_ != NULL)
+      re->group_names_ = re->suffix_regexp_->CaptureNames();
+    if (re->group_names_ == NULL)
+      re->group_names_ = empty_group_names;
+  }, this);
   return *group_names_;
 }
 
@@ -307,7 +342,7 @@
 
 bool RE2::ConsumeN(StringPiece* input, const RE2& re,
                    const Arg* const args[], int n) {
-  int consumed;
+  size_t consumed;
   if (re.DoMatch(*input, ANCHOR_START, &consumed, args, n)) {
     input->remove_prefix(consumed);
     return true;
@@ -318,7 +353,7 @@
 
 bool RE2::FindAndConsumeN(StringPiece* input, const RE2& re,
                           const Arg* const args[], int n) {
-  int consumed;
+  size_t consumed;
   if (re.DoMatch(*input, UNANCHORED, &consumed, args, n)) {
     input->remove_prefix(consumed);
     return true;
@@ -327,28 +362,9 @@
   }
 }
 
-// Returns the maximum submatch needed for the rewrite to be done by Replace().
-// E.g. if rewrite == "foo \\2,\\1", returns 2.
-int RE2::MaxSubmatch(const StringPiece& rewrite) {
-  int max = 0;
-  for (const char *s = rewrite.data(), *end = s + rewrite.size();
-       s < end; s++) {
-    if (*s == '\\') {
-      s++;
-      int c = (s < end) ? *s : -1;
-      if (isdigit(c)) {
-        int n = (c - '0');
-        if (n > max)
-          max = n;
-      }
-    }
-  }
-  return max;
-}
-
-bool RE2::Replace(string *str,
-                 const RE2& re,
-                 const StringPiece& rewrite) {
+bool RE2::Replace(string* str,
+                  const RE2& re,
+                  const StringPiece& rewrite) {
   StringPiece vec[kVecSize];
   int nvec = 1 + MaxSubmatch(rewrite);
   if (nvec > arraysize(vec))
@@ -366,9 +382,9 @@
   return true;
 }
 
-int RE2::GlobalReplace(string *str,
-                      const RE2& re,
-                      const StringPiece& rewrite) {
+int RE2::GlobalReplace(string* str,
+                       const RE2& re,
+                       const StringPiece& rewrite) {
   StringPiece vec[kVecSize];
   int nvec = 1 + MaxSubmatch(rewrite);
   if (nvec > arraysize(vec))
@@ -379,13 +395,44 @@
   const char* lastend = NULL;
   string out;
   int count = 0;
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+  // Iterate just once when fuzzing. Otherwise, we easily get bogged down
+  // and coverage is unlikely to improve despite significant expense.
+  while (p == str->data()) {
+#else
   while (p <= ep) {
-    if (!re.Match(*str, p - str->data(), str->size(), UNANCHORED, vec, nvec))
+#endif
+    if (!re.Match(*str, static_cast<size_t>(p - str->data()),
+                  str->size(), UNANCHORED, vec, nvec))
       break;
     if (p < vec[0].begin())
       out.append(p, vec[0].begin() - p);
     if (vec[0].begin() == lastend && vec[0].size() == 0) {
       // Disallow empty match at end of last match: skip ahead.
+      //
+      // fullrune() takes int, not size_t. However, it just looks
+      // at the leading byte and treats any length >= 4 the same.
+      if (re.options().encoding() == RE2::Options::EncodingUTF8 &&
+          fullrune(p, static_cast<int>(std::min(static_cast<ptrdiff_t>(4),
+                                                ep - p)))) {
+        // re is in UTF-8 mode and there is enough left of str
+        // to allow us to advance by up to UTFmax bytes.
+        Rune r;
+        int n = chartorune(&r, p);
+        // Some copies of chartorune have a bug that accepts
+        // encodings of values in (10FFFF, 1FFFFF] as valid.
+        if (r > Runemax) {
+          n = 1;
+          r = Runeerror;
+        }
+        if (!(n == 1 && r == Runeerror)) {  // no decoding error
+          out.append(p, n);
+          p += n;
+          continue;
+        }
+      }
+      // Most likely, re is in Latin-1 mode. If it is in UTF-8 mode,
+      // we fell through from above and the GIGO principle applies.
       if (p < ep)
         out.append(p, 1);
       p++;
@@ -402,14 +449,15 @@
 
   if (p < ep)
     out.append(p, ep - p);
+  using std::swap;
   swap(out, *str);
   return count;
 }
 
-bool RE2::Extract(const StringPiece &text,
-                 const RE2& re,
-                 const StringPiece &rewrite,
-                 string *out) {
+bool RE2::Extract(const StringPiece& text,
+                  const RE2& re,
+                  const StringPiece& rewrite,
+                  string* out) {
   StringPiece vec[kVecSize];
   int nvec = 1 + MaxSubmatch(rewrite);
   if (nvec > arraysize(vec))
@@ -433,7 +481,7 @@
   // that.  (This also makes it identical to the perl function of the
   // same name except for the null-character special case;
   // see `perldoc -f quotemeta`.)
-  for (int ii = 0; ii < unquoted.length(); ++ii) {
+  for (size_t ii = 0; ii < unquoted.size(); ++ii) {
     // Note that using 'isalnum' here raises the benchmark time from
     // 32ns to 58ns:
     if ((unquoted[ii] < 'a' || unquoted[ii] > 'z') &&
@@ -464,19 +512,19 @@
   if (prog_ == NULL)
     return false;
 
-  int n = prefix_.size();
+  int n = static_cast<int>(prefix_.size());
   if (n > maxlen)
     n = maxlen;
 
   // Determine initial min max from prefix_ literal.
-  string pmin, pmax;
-  pmin = prefix_.substr(0, n);
-  pmax = prefix_.substr(0, n);
+  *min = prefix_.substr(0, n);
+  *max = prefix_.substr(0, n);
   if (prefix_foldcase_) {
-    // prefix is ASCII lowercase; change pmin to uppercase.
+    // prefix is ASCII lowercase; change *min to uppercase.
     for (int i = 0; i < n; i++) {
-      if ('a' <= pmin[i] && pmin[i] <= 'z')
-        pmin[i] += 'A' - 'a';
+      char& c = (*min)[i];
+      if ('a' <= c && c <= 'z')
+        c += 'A' - 'a';
     }
   }
 
@@ -484,13 +532,13 @@
   string dmin, dmax;
   maxlen -= n;
   if (maxlen > 0 && prog_->PossibleMatchRange(&dmin, &dmax, maxlen)) {
-    pmin += dmin;
-    pmax += dmax;
-  } else if (pmax.size() > 0) {
+    min->append(dmin);
+    max->append(dmax);
+  } else if (!max->empty()) {
     // prog_->PossibleMatchRange has failed us,
     // but we still have useful information from prefix_.
-    // Round up pmax to allow any possible suffix.
-    pmax = PrefixSuccessor(pmax);
+    // Round up *max to allow any possible suffix.
+    PrefixSuccessor(max);
   } else {
     // Nothing useful.
     *min = "";
@@ -498,19 +546,17 @@
     return false;
   }
 
-  *min = pmin;
-  *max = pmax;
   return true;
 }
 
 // Avoid possible locale nonsense in standard strcasecmp.
 // The string a is known to be all lowercase.
-static int ascii_strcasecmp(const char* a, const char* b, int len) {
-  const char *ae = a + len;
+static int ascii_strcasecmp(const char* a, const char* b, size_t len) {
+  const char* ae = a + len;
 
   for (; a < ae; a++, b++) {
-    uint8 x = *a;
-    uint8 y = *b;
+    uint8_t x = *a;
+    uint8_t y = *b;
     if ('A' <= y && y <= 'Z')
       y += 'a' - 'A';
     if (x != y)
@@ -523,20 +569,23 @@
 /***** Actual matching and rewriting code *****/
 
 bool RE2::Match(const StringPiece& text,
-                int startpos,
-                int endpos,
+                size_t startpos,
+                size_t endpos,
                 Anchor re_anchor,
                 StringPiece* submatch,
                 int nsubmatch) const {
-  if (!ok() || suffix_regexp_ == NULL) {
+  if (!ok()) {
     if (options_.log_errors())
       LOG(ERROR) << "Invalid RE2: " << *error_;
     return false;
   }
 
-  if (startpos < 0 || startpos > endpos || endpos > text.size()) {
+  if (startpos > endpos || endpos > text.size()) {
     if (options_.log_errors())
-      LOG(ERROR) << "RE2: invalid startpos, endpos pair.";
+      LOG(ERROR) << "RE2: invalid startpos, endpos pair. ["
+                 << "startpos: " << startpos << ", "
+                 << "endpos: " << endpos << ", "
+                 << "text size: " << text.size() << "]";
     return false;
   }
 
@@ -569,7 +618,7 @@
     re_anchor = ANCHOR_START;
 
   // Check for the required prefix, if any.
-  int prefixlen = 0;
+  size_t prefixlen = 0;
   if (!prefix_.empty()) {
     if (startpos != 0)
       return false;
@@ -605,7 +654,7 @@
   const int MaxBitStateProg = 500;   // prog_->size() <= Max.
   const int MaxBitStateVector = 256*1024;  // bit vector size <= Max (bits)
   bool can_bit_state = prog_->size() <= MaxBitStateProg;
-  int bit_state_text_max = MaxBitStateVector / prog_->size();
+  size_t bit_state_text_max = MaxBitStateVector / prog_->size();
 
   bool dfa_failed = false;
   switch (re_anchor) {
@@ -614,24 +663,16 @@
       if (!prog_->SearchDFA(subtext, text, anchor, kind,
                             matchp, &dfa_failed, NULL)) {
         if (dfa_failed) {
+          if (options_.log_errors())
+            LOG(ERROR) << "DFA out of memory: size " << prog_->size() << ", "
+                       << "bytemap range " << prog_->bytemap_range() << ", "
+                       << "list count " << prog_->list_count();
           // Fall back to NFA below.
           skipped_test = true;
-          if (FLAGS_trace_re2)
-            LOG(INFO) << "Match " << trunc(pattern_)
-                      << " [" << CEscape(subtext) << "]"
-                      << " DFA failed.";
           break;
         }
-        if (FLAGS_trace_re2)
-          LOG(INFO) << "Match " << trunc(pattern_)
-                    << " [" << CEscape(subtext) << "]"
-                    << " used DFA - no match.";
         return false;
       }
-      if (FLAGS_trace_re2)
-        LOG(INFO) << "Match " << trunc(pattern_)
-                  << " [" << CEscape(subtext) << "]"
-                  << " used DFA - match";
       if (matchp == NULL)  // Matched.  Don't care where
         return true;
       // SearchDFA set match[0].end() but didn't know where the
@@ -643,26 +684,18 @@
       if (!prog->SearchDFA(match, text, Prog::kAnchored,
                            Prog::kLongestMatch, &match, &dfa_failed, NULL)) {
         if (dfa_failed) {
+          if (options_.log_errors())
+            LOG(ERROR) << "DFA out of memory: size " << prog->size() << ", "
+                       << "bytemap range " << prog->bytemap_range() << ", "
+                       << "list count " << prog->list_count();
           // Fall back to NFA below.
           skipped_test = true;
-          if (FLAGS_trace_re2)
-            LOG(INFO) << "Match " << trunc(pattern_)
-                      << " [" << CEscape(subtext) << "]"
-                      << " reverse DFA failed.";
           break;
         }
-        if (FLAGS_trace_re2)
-          LOG(INFO) << "Match " << trunc(pattern_)
-                    << " [" << CEscape(subtext) << "]"
-                    << " DFA inconsistency.";
         if (options_.log_errors())
-          LOG(ERROR) << "DFA inconsistency";
+          LOG(ERROR) << "SearchDFA inconsistency";
         return false;
       }
-      if (FLAGS_trace_re2)
-        LOG(INFO) << "Match " << trunc(pattern_)
-                  << " [" << CEscape(subtext) << "]"
-                  << " used reverse DFA.";
       break;
     }
 
@@ -681,35 +714,24 @@
       // the DFA does.
       if (can_one_pass && text.size() <= 4096 &&
           (ncap > 1 || text.size() <= 8)) {
-        if (FLAGS_trace_re2)
-          LOG(INFO) << "Match " << trunc(pattern_)
-                    << " [" << CEscape(subtext) << "]"
-                    << " skipping DFA for OnePass.";
         skipped_test = true;
         break;
       }
       if (can_bit_state && text.size() <= bit_state_text_max && ncap > 1) {
-        if (FLAGS_trace_re2)
-          LOG(INFO) << "Match " << trunc(pattern_)
-                    << " [" << CEscape(subtext) << "]"
-                    << " skipping DFA for BitState.";
         skipped_test = true;
         break;
       }
       if (!prog_->SearchDFA(subtext, text, anchor, kind,
                             &match, &dfa_failed, NULL)) {
         if (dfa_failed) {
-          if (FLAGS_trace_re2)
-            LOG(INFO) << "Match " << trunc(pattern_)
-                      << " [" << CEscape(subtext) << "]"
-                      << " DFA failed.";
+          if (options_.log_errors())
+            LOG(ERROR) << "DFA out of memory: size " << prog_->size() << ", "
+                       << "bytemap range " << prog_->bytemap_range() << ", "
+                       << "list count " << prog_->list_count();
+          // Fall back to NFA below.
           skipped_test = true;
           break;
         }
-        if (FLAGS_trace_re2)
-          LOG(INFO) << "Match " << trunc(pattern_)
-                    << " [" << CEscape(subtext) << "]"
-                    << " used DFA - no match.";
         return false;
       }
       break;
@@ -735,20 +757,12 @@
     }
 
     if (can_one_pass && anchor != Prog::kUnanchored) {
-      if (FLAGS_trace_re2)
-        LOG(INFO) << "Match " << trunc(pattern_)
-                  << " [" << CEscape(subtext) << "]"
-                  << " using OnePass.";
       if (!prog_->SearchOnePass(subtext1, text, anchor, kind, submatch, ncap)) {
         if (!skipped_test && options_.log_errors())
           LOG(ERROR) << "SearchOnePass inconsistency";
         return false;
       }
     } else if (can_bit_state && subtext1.size() <= bit_state_text_max) {
-      if (FLAGS_trace_re2)
-        LOG(INFO) << "Match " << trunc(pattern_)
-                  << " [" << CEscape(subtext) << "]"
-                  << " using BitState.";
       if (!prog_->SearchBitState(subtext1, text, anchor,
                                  kind, submatch, ncap)) {
         if (!skipped_test && options_.log_errors())
@@ -756,10 +770,6 @@
         return false;
       }
     } else {
-      if (FLAGS_trace_re2)
-        LOG(INFO) << "Match " << trunc(pattern_)
-                  << " [" << CEscape(subtext) << "]"
-                  << " using NFA.";
       if (!prog_->SearchNFA(subtext1, text, anchor, kind, submatch, ncap)) {
         if (!skipped_test && options_.log_errors())
           LOG(ERROR) << "SearchNFA inconsistency";
@@ -770,19 +780,19 @@
 
   // Adjust overall match for required prefix that we stripped off.
   if (prefixlen > 0 && nsubmatch > 0)
-    submatch[0] = StringPiece(submatch[0].begin() - prefixlen,
+    submatch[0] = StringPiece(submatch[0].data() - prefixlen,
                               submatch[0].size() + prefixlen);
 
   // Zero submatches that don't exist in the regexp.
   for (int i = ncap; i < nsubmatch; i++)
-    submatch[i] = NULL;
+    submatch[i] = StringPiece();
   return true;
 }
 
 // Internal matcher - like Match() but takes Args not StringPieces.
 bool RE2::DoMatch(const StringPiece& text,
-                  Anchor anchor,
-                  int* consumed,
+                  Anchor re_anchor,
+                  size_t* consumed,
                   const Arg* const* args,
                   int n) const {
   if (!ok()) {
@@ -791,6 +801,11 @@
     return false;
   }
 
+  if (NumberOfCapturingGroups() < n) {
+    // RE has fewer capturing groups than number of Arg pointers passed in.
+    return false;
+  }
+
   // Count number of capture groups needed.
   int nvec;
   if (n == 0 && consumed == NULL)
@@ -809,13 +824,13 @@
     heapvec = vec;
   }
 
-  if (!Match(text, 0, text.size(), anchor, vec, nvec)) {
+  if (!Match(text, 0, text.size(), re_anchor, vec, nvec)) {
     delete[] heapvec;
     return false;
   }
 
-  if(consumed != NULL)
-    *consumed = vec[0].end() - text.begin();
+  if (consumed != NULL)
+    *consumed = static_cast<size_t>(vec[0].end() - text.begin());
 
   if (n == 0 || args == NULL) {
     // We are not interested in results
@@ -823,21 +838,11 @@
     return true;
   }
 
-  int ncap = NumberOfCapturingGroups();
-  if (ncap < n) {
-    // RE has fewer capturing groups than number of arg pointers passed in
-    VLOG(1) << "Asked for " << n << " but only have " << ncap;
-    delete[] heapvec;
-    return false;
-  }
-
   // If we got here, we must have matched the whole pattern.
   for (int i = 0; i < n; i++) {
     const StringPiece& s = vec[i+1];
     if (!args[i]->Parse(s.data(), s.size())) {
       // TODO: Should we indicate what the error was?
-      VLOG(1) << "Parse error on #" << i << " " << s << " "
-	      << (void*)s.data() << "/" << s.size();
       delete[] heapvec;
       return false;
     }
@@ -847,54 +852,6 @@
   return true;
 }
 
-// Append the "rewrite" string, with backslash subsitutions from "vec",
-// to string "out".
-bool RE2::Rewrite(string *out, const StringPiece &rewrite,
-                 const StringPiece *vec, int veclen) const {
-  for (const char *s = rewrite.data(), *end = s + rewrite.size();
-       s < end; s++) {
-    int c = *s;
-    if (c == '\\') {
-      s++;
-      c = (s < end) ? *s : -1;
-      if (isdigit(c)) {
-        int n = (c - '0');
-        if (n >= veclen) {
-          if (options_.log_errors()) {
-            LOG(ERROR) << "requested group " << n
-                       << " in regexp " << rewrite.data();
-          }
-          return false;
-        }
-        StringPiece snip = vec[n];
-        if (snip.size() > 0)
-          out->append(snip.data(), snip.size());
-      } else if (c == '\\') {
-        out->push_back('\\');
-      } else {
-        if (options_.log_errors())
-          LOG(ERROR) << "invalid rewrite pattern: " << rewrite.data();
-        return false;
-      }
-    } else {
-      out->push_back(c);
-    }
-  }
-  return true;
-}
-
-// Return the number of capturing subpatterns, or -1 if the
-// regexp wasn't valid on construction.
-int RE2::NumberOfCapturingGroups() const {
-  if (suffix_regexp_ == NULL)
-    return -1;
-  ANNOTATE_BENIGN_RACE(&num_captures_, "benign race: in the worst case"
-    " multiple threads end up doing the same work in parallel.");
-  if (num_captures_ == -1)
-    num_captures_ = suffix_regexp_->NumCaptures();
-  return num_captures_;
-}
-
 // Checks that the rewrite string is well-formed with respect to this
 // regular expression.
 bool RE2::CheckRewriteString(const StringPiece& rewrite, string* error) const {
@@ -933,33 +890,96 @@
   return true;
 }
 
+// Returns the maximum submatch needed for the rewrite to be done by Replace().
+// E.g. if rewrite == "foo \\2,\\1", returns 2.
+int RE2::MaxSubmatch(const StringPiece& rewrite) {
+  int max = 0;
+  for (const char *s = rewrite.data(), *end = s + rewrite.size();
+       s < end; s++) {
+    if (*s == '\\') {
+      s++;
+      int c = (s < end) ? *s : -1;
+      if (isdigit(c)) {
+        int n = (c - '0');
+        if (n > max)
+          max = n;
+      }
+    }
+  }
+  return max;
+}
+
+// Append the "rewrite" string, with backslash subsitutions from "vec",
+// to string "out".
+bool RE2::Rewrite(string* out,
+                  const StringPiece& rewrite,
+                  const StringPiece* vec,
+                  int veclen) const {
+  for (const char *s = rewrite.data(), *end = s + rewrite.size();
+       s < end; s++) {
+    if (*s != '\\') {
+      out->push_back(*s);
+      continue;
+    }
+    s++;
+    int c = (s < end) ? *s : -1;
+    if (isdigit(c)) {
+      int n = (c - '0');
+      if (n >= veclen) {
+        if (options_.log_errors()) {
+          LOG(ERROR) << "requested group " << n
+                     << " in regexp " << rewrite.data();
+        }
+        return false;
+      }
+      StringPiece snip = vec[n];
+      if (snip.size() > 0)
+        out->append(snip.data(), snip.size());
+    } else if (c == '\\') {
+      out->push_back('\\');
+    } else {
+      if (options_.log_errors())
+        LOG(ERROR) << "invalid rewrite pattern: " << rewrite.data();
+      return false;
+    }
+  }
+  return true;
+}
+
 /***** Parsers for various types *****/
 
-bool RE2::Arg::parse_null(const char* str, int n, void* dest) {
+bool RE2::Arg::parse_null(const char* str, size_t n, void* dest) {
   // We fail if somebody asked us to store into a non-NULL void* pointer
   return (dest == NULL);
 }
 
-bool RE2::Arg::parse_string(const char* str, int n, void* dest) {
+bool RE2::Arg::parse_string(const char* str, size_t n, void* dest) {