Merge "Update aosp/master compiler-rt for rebase to r230699."
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 472d49c..f348585 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -104,7 +104,8 @@
   # Get some LLVM variables from LLVMConfig.
   include("${LLVM_CMAKE_PATH}/LLVMConfig.cmake")
 
-  set(LLVM_LIBRARY_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/${CMAKE_CFG_INTDIR}/lib)
+  set(LLVM_LIBRARY_OUTPUT_INTDIR
+    ${CMAKE_BINARY_DIR}/${CMAKE_CFG_INTDIR}/lib${LLVM_LIBDIR_SUFFIX})
 
   # Find Python interpreter.
   set(Python_ADDITIONAL_VERSIONS 2.7 2.6 2.5)
@@ -162,31 +163,6 @@
 # Setup custom SDK sysroots.
 set(COMPILER_RT_LINUX_SDK_SYSROOT ${COMPILER_RT_SOURCE_DIR}/SDKs/linux)
 
-set(COMPILER_RT_EXTRA_ANDROID_HEADERS ${COMPILER_RT_SOURCE_DIR}/android/include)
-
-# Detect whether the current target platform is 32-bit or 64-bit, and setup
-# the correct commandline flags needed to attempt to target 32-bit and 64-bit.
-if (NOT CMAKE_SIZEOF_VOID_P EQUAL 4 AND
-    NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
-  message(FATAL_ERROR "Please use architecture with 4 or 8 byte pointers.")
-endif()
-if (NOT MSVC)
-  set(TARGET_64_BIT_CFLAGS "-m64")
-  set(TARGET_32_BIT_CFLAGS "-m32")
-else()
-  set(TARGET_64_BIT_CFLAGS "")
-  set(TARGET_32_BIT_CFLAGS "")
-endif()
-
-function(get_target_flags_for_arch arch out_var)
-  list(FIND COMPILER_RT_SUPPORTED_ARCH ${arch} ARCH_INDEX)
-  if(ARCH_INDEX EQUAL -1)
-    message(FATAL_ERROR "Unsupported architecture: ${arch}")
-  else()
-    set(${out_var} ${TARGET_${arch}_CFLAGS} PARENT_SCOPE)
-  endif()
-endfunction()
-
 # We support running instrumented tests when we're not cross compiling
 # and target a UNIX-like system or Windows.
 # We can run tests on Android even when we are cross-compiling.
@@ -201,17 +177,12 @@
 # COMPILER_RT_DEBUG_PYBOOL is used by lit.common.configured.in.
 pythonize_bool(COMPILER_RT_DEBUG)
 
-# We have to support both static and dynamic/shared runtime on Windows.
-# Android only works with dynamic runtime.
-if(WIN32 OR ANDROID)
-option(COMPILER_RT_BUILD_SHARED_ASAN "Build shared version of AddressSanitizer runtime" ON)
-else()
-option(COMPILER_RT_BUILD_SHARED_ASAN "Build shared version of AddressSanitizer runtime" OFF)
-endif()
-
 #================================
 # Setup Compiler Flags
 #================================
+include(CheckIncludeFile)
+check_include_file(unwind.h HAVE_UNWIND_H)
+
 include(config-ix)
 
 if(MSVC)
@@ -240,6 +211,7 @@
 append_list_if(COMPILER_RT_HAS_FNO_STACK_PROTECTOR_FLAG -fno-stack-protector SANITIZER_COMMON_CFLAGS)
 append_list_if(COMPILER_RT_HAS_FVISIBILITY_HIDDEN_FLAG -fvisibility=hidden SANITIZER_COMMON_CFLAGS)
 append_list_if(COMPILER_RT_HAS_FNO_FUNCTION_SECTIONS_FLAG -fno-function-sections SANITIZER_COMMON_CFLAGS)
+append_list_if(COMPILER_RT_HAS_FNO_LTO_FLAG -fno-lto SANITIZER_COMMON_CFLAGS)
 
 if(MSVC)
   # Replace the /MD[d] flags with /MT.
@@ -259,14 +231,28 @@
   append_list_if(COMPILER_RT_HAS_GS_FLAG /GS- SANITIZER_COMMON_CFLAGS)
 endif()
 
+append_list_if(COMPILER_RT_DEBUG -DSANITIZER_DEBUG=1 SANITIZER_COMMON_CFLAGS)
+
 # Build with optimization, unless we're in debug mode. If we're using MSVC,
 # always respect the optimization flags set by CMAKE_BUILD_TYPE instead.
 if(NOT COMPILER_RT_DEBUG AND NOT MSVC)
   list(APPEND SANITIZER_COMMON_CFLAGS -O3)
 endif()
 
+# Determine if we should restrict stack frame sizes.
+# Stack frames on PowerPC and in debug biuld can be much larger than
+# anticipated.
+# FIXME: Fix all sanitizers and add -Wframe-larger-than to
+# SANITIZER_COMMON_FLAGS
+if(COMPILER_RT_HAS_WFRAME_LARGER_THAN_FLAG AND NOT COMPILER_RT_DEBUG
+   AND NOT ${LLVM_NATIVE_ARCH} STREQUAL "PowerPC")
+  set(SANITIZER_LIMIT_FRAME_SIZE TRUE)
+else()
+  set(SANITIZER_LIMIT_FRAME_SIZE FALSE)
+endif()
+
 # Build sanitizer runtimes with debug info.
-if(COMPILER_RT_HAS_GLINE_TABLES_ONLY_FLAG)
+if(COMPILER_RT_HAS_GLINE_TABLES_ONLY_FLAG AND NOT COMPILER_RT_DEBUG)
   list(APPEND SANITIZER_COMMON_CFLAGS -gline-tables-only)
 elseif(COMPILER_RT_HAS_G_FLAG)
   list(APPEND SANITIZER_COMMON_CFLAGS -g)
@@ -285,12 +271,27 @@
 append_list_if(COMPILER_RT_HAS_WD4722_FLAG /wd4722 SANITIZER_COMMON_CFLAGS)
 append_list_if(COMPILER_RT_HAS_WD4800_FLAG /wd4800 SANITIZER_COMMON_CFLAGS)
 if(APPLE)
-  # Obtain the iOS Simulator SDK path from xcodebuild.
-  execute_process(
-    COMMAND xcodebuild -version -sdk iphonesimulator Path
-    OUTPUT_VARIABLE IOSSIM_SDK_DIR
-    OUTPUT_STRIP_TRAILING_WHITESPACE
-  )
+  macro(find_darwin_sdk_dir var sdk_name)
+    # Let's first try the internal SDK, otherwise use the public SDK.
+    execute_process(
+      COMMAND xcodebuild -version -sdk ${sdk_name}.internal Path
+      OUTPUT_VARIABLE ${var}
+      OUTPUT_STRIP_TRAILING_WHITESPACE
+      ERROR_FILE /dev/null
+    )
+    if(${var} STREQUAL "")
+      execute_process(
+        COMMAND xcodebuild -version -sdk ${sdk_name} Path
+        OUTPUT_VARIABLE ${var}
+        OUTPUT_STRIP_TRAILING_WHITESPACE
+        ERROR_FILE /dev/null
+      )
+    endif()
+  endmacro()
+
+  find_darwin_sdk_dir(OSX_SDK_DIR macosx)
+  find_darwin_sdk_dir(IOSSIM_SDK_DIR iphonesimulator)
+
   string(REGEX MATCH "-mmacosx-version-min="
          MACOSX_VERSION_MIN_FLAG "${CMAKE_CXX_FLAGS}")
   set(SANITIZER_COMMON_SUPPORTED_DARWIN_OS osx)
@@ -300,10 +301,12 @@
 
   set(SANITIZER_MIN_OSX_VERSION 10.7)
   set(CMAKE_OSX_DEPLOYMENT_TARGET "") # We're setting the flag manually below.
-  set(DARWIN_osx_CFLAGS -mmacosx-version-min=${SANITIZER_MIN_OSX_VERSION})
+  set(DARWIN_osx_CFLAGS -mmacosx-version-min=${SANITIZER_MIN_OSX_VERSION}
+    -isysroot ${OSX_SDK_DIR} -stdlib=libc++)
   set(DARWIN_iossim_CFLAGS
     -mios-simulator-version-min=7.0 -isysroot ${IOSSIM_SDK_DIR})
-  set(DARWIN_osx_LINKFLAGS)
+  set(DARWIN_osx_LINKFLAGS -mmacosx-version-min=${SANITIZER_MIN_OSX_VERSION}
+    -isysroot ${OSX_SDK_DIR} -stdlib=libc++)
   set(DARWIN_iossim_LINKFLAGS
     -Wl,-ios_simulator_version_min,7.0.0
     -mios-simulator-version-min=7.0
diff --git a/android/README.LLVM b/android/README.LLVM
deleted file mode 100644
index 4a249e5..0000000
--- a/android/README.LLVM
+++ /dev/null
@@ -1,9 +0,0 @@
-LLVM notes
-----------
-
-This directory contains Android header ucontext.h missing from the NDK.
-This version of the header was copied from google-breakpad at r1279.
-
-Local changes:
-  * Re-licensed under the standard dual license of compiler-rt.
-
diff --git a/android/include/sys/ucontext.h b/android/include/sys/ucontext.h
deleted file mode 100644
index 81e65a5..0000000
--- a/android/include/sys/ucontext.h
+++ /dev/null
@@ -1,154 +0,0 @@
-//===-- ucontext.h ----------------------------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is dual licensed under the MIT and the University of Illinois Open
-// Source Licenses. See LICENSE.TXT for details.
-//
-// ===----------------------------------------------------------------------===
-
-#ifndef GOOGLE_BREAKPAD_COMMON_ANDROID_INCLUDE_SYS_UCONTEXT_H
-#define GOOGLE_BREAKPAD_COMMON_ANDROID_INCLUDE_SYS_UCONTEXT_H
-
-#include <sys/cdefs.h>
-#include <signal.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif  // __cplusplus
-
-#ifndef __BIONIC_HAVE_UCONTEXT_T
-
-// Ensure that 'stack_t' is defined.
-#include <asm/signal.h>
-
-// This version of the Android C library headers do not provide ucontext_t.
-// Provide custom definitions for Google Breakpad.
-#if defined(__arm__)
-
-// Ensure that 'struct sigcontext' is defined.
-#include <asm/sigcontext.h>
-typedef struct sigcontext mcontext_t;
-
-// The ARM kernel uses a 64-bit signal mask.
-typedef uint32_t  kernel_sigmask_t[2];
-
-typedef struct ucontext {
-  uint32_t uc_flags;
-  struct ucontext* uc_link;
-  stack_t uc_stack;
-  mcontext_t uc_mcontext;
-  kernel_sigmask_t uc_sigmask;
-  // Other fields are not used by Google Breakpad. Don't define them.
-} ucontext_t;
-
-#elif defined(__i386__)
-
-/* 80-bit floating-point register */
-struct _libc_fpreg {
-  unsigned short significand[4];
-  unsigned short exponent;
-};
-
-/* Simple floating-point state, see FNSTENV instruction */
-struct _libc_fpstate {
-  unsigned long cw;
-  unsigned long sw;
-  unsigned long tag;
-  unsigned long ipoff;
-  unsigned long cssel;
-  unsigned long dataoff;
-  unsigned long datasel;
-  struct _libc_fpreg _st[8];
-  unsigned long status;
-};
-
-typedef uint32_t  greg_t;
-
-typedef struct {
-  uint32_t gregs[19];
-  struct _libc_fpstate* fpregs;
-  uint32_t oldmask;
-  uint32_t cr2;
-} mcontext_t;
-
-enum {
-  REG_GS = 0,
-  REG_FS,
-  REG_ES,
-  REG_DS,
-  REG_EDI,
-  REG_ESI,
-  REG_EBP,
-  REG_ESP,
-  REG_EBX,
-  REG_EDX,
-  REG_ECX,
-  REG_EAX,
-  REG_TRAPNO,
-  REG_ERR,
-  REG_EIP,
-  REG_CS,
-  REG_EFL,
-  REG_UESP,
-  REG_SS,
-};
-
-// The i386 kernel uses a 64-bit signal mask.
-typedef uint32_t kernel_sigmask_t[2];
-
-typedef struct ucontext {
-  uint32_t uc_flags;
-  struct ucontext* uc_link;
-  stack_t uc_stack;
-  mcontext_t uc_mcontext;
-  kernel_sigmask_t uc_sigmask;
-  struct _libc_fpstate __fpregs_mem;
-} ucontext_t;
-
-#elif defined(__mips__)
-
-typedef struct {
-  uint32_t regmask;
-  uint32_t status;
-  uint64_t pc;
-  uint64_t gregs[32];
-  uint64_t fpregs[32];
-  uint32_t acx;
-  uint32_t fpc_csr;
-  uint32_t fpc_eir;
-  uint32_t used_math;
-  uint32_t dsp;
-  uint64_t mdhi;
-  uint64_t mdlo;
-  uint32_t hi1;
-  uint32_t lo1;
-  uint32_t hi2;
-  uint32_t lo2;
-  uint32_t hi3;
-  uint32_t lo3;
-} mcontext_t;
-
-// The MIPS kernel uses a 128-bit signal mask.
-typedef uint32_t kernel_sigmask_t[4];
-
-typedef struct ucontext {
-  uint32_t uc_flags;
-  struct ucontext* uc_link;
-  stack_t uc_stack;
-  mcontext_t uc_mcontext;
-  kernel_sigmask_t uc_sigmask;
-  // Other fields are not used by Google Breakpad. Don't define them.
-} ucontext_t;
-
-#else
-#  error "Unsupported Android CPU ABI!"
-#endif
-
-#endif  // __BIONIC_HAVE_UCONTEXT_T
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif  // __cplusplus
-
-#endif  // GOOGLE_BREAKPAD_COMMON_ANDROID_INCLUDE_SYS_UCONTEXT_H
diff --git a/android/include/ucontext.h b/android/include/ucontext.h
deleted file mode 100644
index 178ddf3..0000000
--- a/android/include/ucontext.h
+++ /dev/null
@@ -1,36 +0,0 @@
-//===-- ucontext.h ----------------------------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is dual licensed under the MIT and the University of Illinois Open
-// Source Licenses. See LICENSE.TXT for details.
-//
-// ===----------------------------------------------------------------------===
-
-#ifndef GOOGLE_BREAKPAD_COMMON_ANDROID_INCLUDE_UCONTEXT_H
-#define GOOGLE_BREAKPAD_COMMON_ANDROID_INCLUDE_UCONTEXT_H
-
-#include <sys/cdefs.h>
-
-#ifdef __BIONIC_UCONTEXT_H
-#include <ucontext.h>
-#else
-
-#include <sys/ucontext.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif  // __cplusplus
-
-// Provided by src/android/common/breakpad_getcontext.S
-int breakpad_getcontext(ucontext_t* ucp);
-
-#define getcontext(x)   breakpad_getcontext(x)
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif  // __cplusplus
-
-#endif  // __BIONIC_UCONTEXT_H
-
-#endif  // GOOGLE_BREAKPAD_COMMON_ANDROID_INCLUDE_UCONTEXT_H
diff --git a/cmake/Modules/AddCompilerRT.cmake b/cmake/Modules/AddCompilerRT.cmake
index aafccd4..a7782a1 100644
--- a/cmake/Modules/AddCompilerRT.cmake
+++ b/cmake/Modules/AddCompilerRT.cmake
@@ -59,7 +59,8 @@
     # Setup correct output directory in the build tree.
     set_target_properties(${name} PROPERTIES
       ARCHIVE_OUTPUT_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR}
-      LIBRARY_OUTPUT_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR})
+      LIBRARY_OUTPUT_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR}
+      RUNTIME_OUTPUT_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR})
     if ("${LIB_OUTPUT_NAME}" STREQUAL "")
       set_target_properties(${name} PROPERTIES
         OUTPUT_NAME ${name}${COMPILER_RT_OS_SUFFIX})
@@ -70,7 +71,8 @@
     # Add installation command.
     install(TARGETS ${name}
       ARCHIVE DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR}
-      LIBRARY DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR})
+      LIBRARY DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR}
+      RUNTIME DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR})
   else()
     message(FATAL_ERROR "Archtecture ${arch} can't be targeted")
   endif()
@@ -129,10 +131,11 @@
   -I${COMPILER_RT_GTEST_PATH}
 )
 
+append_list_if(COMPILER_RT_DEBUG -DSANITIZER_DEBUG=1 COMPILER_RT_TEST_CFLAGS)
+
 if(MSVC)
   # clang doesn't support exceptions on Windows yet.
-  list(APPEND COMPILER_RT_TEST_CFLAGS
-       -D_HAS_EXCEPTIONS=0)
+  list(APPEND COMPILER_RT_TEST_CFLAGS -D_HAS_EXCEPTIONS=0)
 
   # We should teach clang to understand "#pragma intrinsic", see PR19898.
   list(APPEND COMPILER_RT_TEST_CFLAGS -Wno-undefined-inline)
@@ -154,12 +157,20 @@
 # using specified link flags. Make executable a part of provided
 # test_suite.
 # add_compiler_rt_test(<test_suite> <test_name>
+#                      SUBDIR <subdirectory for binary>
 #                      OBJECTS <object files>
 #                      DEPS <deps (e.g. runtime libs)>
 #                      LINK_FLAGS <link flags>)
 macro(add_compiler_rt_test test_suite test_name)
-  parse_arguments(TEST "OBJECTS;DEPS;LINK_FLAGS" "" ${ARGN})
-  set(output_bin "${CMAKE_CURRENT_BINARY_DIR}/${test_name}")
+  parse_arguments(TEST "SUBDIR;OBJECTS;DEPS;LINK_FLAGS" "" ${ARGN})
+  if(TEST_SUBDIR)
+    set(output_bin "${CMAKE_CURRENT_BINARY_DIR}/${TEST_SUBDIR}/${test_name}")
+  else()
+    set(output_bin "${CMAKE_CURRENT_BINARY_DIR}/${test_name}")
+  endif()
+  if(MSVC)
+    set(output_bin "${output_bin}.exe")
+  endif()
   # Use host compiler in a standalone build, and just-built Clang otherwise.
   if(NOT COMPILER_RT_STANDALONE_BUILD)
     list(APPEND TEST_DEPS clang)
diff --git a/cmake/Modules/CompilerRTCompile.cmake b/cmake/Modules/CompilerRTCompile.cmake
index af3df8f..de73ccf 100644
--- a/cmake/Modules/CompilerRTCompile.cmake
+++ b/cmake/Modules/CompilerRTCompile.cmake
@@ -9,7 +9,7 @@
   parse_arguments(SOURCE "CFLAGS;DEPS" "" ${ARGN})
   get_filename_component(source_rpath ${source} REALPATH)
   if(NOT COMPILER_RT_STANDALONE_BUILD)
-    list(APPEND SOURCE_DEPS clang)
+    list(APPEND SOURCE_DEPS clang compiler-rt-headers)
   endif()
   if (TARGET CompilerRTUnitTestCheckCxx)
     list(APPEND SOURCE_DEPS CompilerRTUnitTestCheckCxx)
diff --git a/cmake/config-ix.cmake b/cmake/config-ix.cmake
index 90ab7fb..9c4c8de 100644
--- a/cmake/config-ix.cmake
+++ b/cmake/config-ix.cmake
@@ -1,6 +1,8 @@
+include(CMakePushCheckState)
 include(CheckCXXCompilerFlag)
 include(CheckLibraryExists)
 include(CheckSymbolExists)
+include(TestBigEndian)
 
 # CodeGen options.
 check_cxx_compiler_flag(-fPIC                COMPILER_RT_HAS_FPIC_FLAG)
@@ -16,6 +18,8 @@
 check_cxx_compiler_flag("-Werror -fno-function-sections" COMPILER_RT_HAS_FNO_FUNCTION_SECTIONS_FLAG)
 check_cxx_compiler_flag(-std=c++11           COMPILER_RT_HAS_STD_CXX11_FLAG)
 check_cxx_compiler_flag(-ftls-model=initial-exec COMPILER_RT_HAS_FTLS_MODEL_INITIAL_EXEC)
+check_cxx_compiler_flag(-fno-lto             COMPILER_RT_HAS_FNO_LTO_FLAG)
+check_cxx_compiler_flag(-msse3               COMPILER_RT_HAS_MSSE3_FLAG)
 
 check_cxx_compiler_flag(/GR COMPILER_RT_HAS_GR_FLAG)
 check_cxx_compiler_flag(/GS COMPILER_RT_HAS_GS_FLAG)
@@ -26,7 +30,7 @@
 check_cxx_compiler_flag(-gline-tables-only COMPILER_RT_HAS_GLINE_TABLES_ONLY_FLAG)
 check_cxx_compiler_flag(-g COMPILER_RT_HAS_G_FLAG)
 check_cxx_compiler_flag(/Zi COMPILER_RT_HAS_Zi_FLAG)
- 
+
 # Warnings.
 check_cxx_compiler_flag(-Wall COMPILER_RT_HAS_WALL_FLAG)
 check_cxx_compiler_flag(-Werror COMPILER_RT_HAS_WERROR_FLAG)
@@ -67,19 +71,39 @@
 set(SIMPLE_SOURCE ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/simple.cc)
 file(WRITE ${SIMPLE_SOURCE} "#include <stdlib.h>\n#include <limits>\nint main() {}\n")
 
-# test_target_arch(<arch> <target flags...>)
-# Sets the target flags for a given architecture and determines if this
-# architecture is supported by trying to build a simple file.
-macro(test_target_arch arch)
+function(check_compile_definition def argstring out_var)
+  if("${def}" STREQUAL "")
+    set(${out_var} TRUE PARENT_SCOPE)
+    return()
+  endif()
+  cmake_push_check_state()
+  set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${argstring}")
+  check_symbol_exists(${def} "" ${out_var})
+  cmake_pop_check_state()
+endfunction()
+
+# test_target_arch(<arch> <def> <target flags...>)
+# Checks if architecture is supported: runs host compiler with provided
+# flags to verify that:
+#   1) <def> is defined (if non-empty)
+#   2) simple file can be successfully built.
+# If successful, saves target flags for this architecture.
+macro(test_target_arch arch def)
   set(TARGET_${arch}_CFLAGS ${ARGN})
-  set(argstring "${CMAKE_EXE_LINKER_FLAGS}")
+  set(argstring "")
   foreach(arg ${ARGN})
     set(argstring "${argstring} ${arg}")
   endforeach()
-  try_compile(CAN_TARGET_${arch} ${CMAKE_BINARY_DIR} ${SIMPLE_SOURCE}
-              COMPILE_DEFINITIONS "${TARGET_${arch}_CFLAGS}"
-              OUTPUT_VARIABLE TARGET_${arch}_OUTPUT
-              CMAKE_FLAGS "-DCMAKE_EXE_LINKER_FLAGS:STRING=${argstring}")
+  check_compile_definition("${def}" "${argstring}" HAS_${arch}_DEF)
+  if(NOT HAS_${arch}_DEF)
+    set(CAN_TARGET_${arch} FALSE)
+  else()
+    set(argstring "${CMAKE_EXE_LINKER_FLAGS} ${argstring}")
+    try_compile(CAN_TARGET_${arch} ${CMAKE_BINARY_DIR} ${SIMPLE_SOURCE}
+                COMPILE_DEFINITIONS "${TARGET_${arch}_CFLAGS}"
+                OUTPUT_VARIABLE TARGET_${arch}_OUTPUT
+                CMAKE_FLAGS "-DCMAKE_EXE_LINKER_FLAGS:STRING=${argstring}")
+  endif()
   if(${CAN_TARGET_${arch}})
     list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch})
   elseif("${COMPILER_RT_TEST_TARGET_ARCH}" MATCHES "${arch}")
@@ -120,6 +144,13 @@
   endif()
 endmacro()
 
+# Detect whether the current target platform is 32-bit or 64-bit, and setup
+# the correct commandline flags needed to attempt to target 32-bit and 64-bit.
+if (NOT CMAKE_SIZEOF_VOID_P EQUAL 4 AND
+    NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
+  message(FATAL_ERROR "Please use architecture with 4 or 8 byte pointers.")
+endif()
+
 # Generate the COMPILER_RT_SUPPORTED_ARCH list.
 if(ANDROID)
   # Can't rely on LLVM_NATIVE_ARCH in cross-compilation.
@@ -128,28 +159,37 @@
   set(COMPILER_RT_OS_SUFFIX "-android")
 else()
   if("${LLVM_NATIVE_ARCH}" STREQUAL "X86")
-    if (NOT MSVC)
-      test_target_arch(x86_64 ${TARGET_64_BIT_CFLAGS})
+    if(NOT MSVC)
+      test_target_arch(x86_64 "" "-m64")
+      test_target_arch(i686 __i686__ "-m32")
+      if(NOT CAN_TARGET_i686)
+        test_target_arch(i386 __i386__ "-m32")
+      endif()
+    else()
+      test_target_arch(i386 "" "")
     endif()
-    test_target_arch(i386 ${TARGET_32_BIT_CFLAGS})
   elseif("${LLVM_NATIVE_ARCH}" STREQUAL "PowerPC")
-    test_target_arch(powerpc64 ${TARGET_64_BIT_CFLAGS})
-    test_target_arch(powerpc64le ${TARGET_64_BIT_CFLAGS})
+    TEST_BIG_ENDIAN(HOST_IS_BIG_ENDIAN)
+    if(HOST_IS_BIG_ENDIAN)
+      test_target_arch(powerpc64 "" "-m64")
+    else()
+      test_target_arch(powerpc64le "" "-m64")
+    endif()
   elseif("${LLVM_NATIVE_ARCH}" STREQUAL "Mips")
     if("${COMPILER_RT_TEST_TARGET_ARCH}" MATCHES "mipsel|mips64el")
       # regex for mipsel, mips64el
-      test_target_arch(mipsel ${TARGET_32_BIT_CFLAGS})
-      test_target_arch(mips64el ${TARGET_64_BIT_CFLAGS})
+      test_target_arch(mipsel "" "-m32")
+      test_target_arch(mips64el "" "-m64")
     else()
-      test_target_arch(mips ${TARGET_32_BIT_CFLAGS})
-      test_target_arch(mips64 ${TARGET_64_BIT_CFLAGS})
+      test_target_arch(mips "" "-m32")
+      test_target_arch(mips64 "" "-m64")
     endif()
   elseif("${COMPILER_RT_TEST_TARGET_ARCH}" MATCHES "arm")
-    test_target_arch(arm "-march=armv7-a")
+    test_target_arch(arm "" "-march=armv7-a")
   elseif("${COMPILER_RT_TEST_TARGET_ARCH}" MATCHES "aarch32")
-    test_target_arch(aarch32 "-march=armv8-a")
+    test_target_arch(aarch32 "" "-march=armv8-a")
   elseif("${COMPILER_RT_TEST_TARGET_ARCH}" MATCHES "aarch64")
-    test_target_arch(aarch64 "-march=aarch64")
+    test_target_arch(aarch64 "" "-march=armv8-a")
   endif()
   set(COMPILER_RT_OS_SUFFIX "")
 endif()
@@ -168,13 +208,22 @@
   set(${out_var} ${archs} PARENT_SCOPE)
 endfunction()
 
-# Arhcitectures supported by compiler-rt libraries.
+function(get_target_flags_for_arch arch out_var)
+  list(FIND COMPILER_RT_SUPPORTED_ARCH ${arch} ARCH_INDEX)
+  if(ARCH_INDEX EQUAL -1)
+    message(FATAL_ERROR "Unsupported architecture: ${arch}")
+  else()
+    set(${out_var} ${TARGET_${arch}_CFLAGS} PARENT_SCOPE)
+  endif()
+endfunction()
+
+# Architectures supported by compiler-rt libraries.
 filter_available_targets(SANITIZER_COMMON_SUPPORTED_ARCH
   x86_64 i386 i686 powerpc64 powerpc64le arm aarch64 mips mips64 mipsel mips64el)
 filter_available_targets(ASAN_SUPPORTED_ARCH
   x86_64 i386 i686 powerpc64 powerpc64le arm mips mipsel mips64 mips64el)
-filter_available_targets(DFSAN_SUPPORTED_ARCH x86_64)
-filter_available_targets(LSAN_SUPPORTED_ARCH x86_64)
+filter_available_targets(DFSAN_SUPPORTED_ARCH x86_64 mips64 mips64el)
+filter_available_targets(LSAN_SUPPORTED_ARCH x86_64 mips64 mips64el)
 # LSan common files should be available on all architectures supported
 # by other sanitizers (even if they build into dummy object files).
 filter_available_targets(LSAN_COMMON_SUPPORTED_ARCH
@@ -182,8 +231,8 @@
 filter_available_targets(MSAN_SUPPORTED_ARCH x86_64 mips64 mips64el)
 filter_available_targets(PROFILE_SUPPORTED_ARCH x86_64 i386 i686 arm mips mips64
   mipsel mips64el aarch64 powerpc64 powerpc64le)
-filter_available_targets(TSAN_SUPPORTED_ARCH x86_64)
-filter_available_targets(UBSAN_SUPPORTED_ARCH x86_64 i386 i686 arm aarch64 mips mipsel)
+filter_available_targets(TSAN_SUPPORTED_ARCH x86_64 mips64 mips64el)
+filter_available_targets(UBSAN_SUPPORTED_ARCH x86_64 i386 i686 arm aarch64 mips mipsel mips64 mips64el)
 
 if(ANDROID)
   set(OS_NAME "Android")
@@ -205,6 +254,12 @@
   set(COMPILER_RT_HAS_ASAN FALSE)
 endif()
 
+if (OS_NAME MATCHES "Linux|FreeBSD|Windows")
+  set(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME TRUE)
+else()
+  set(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME FALSE)
+endif()
+
 # TODO: Add builtins support.
 
 if (COMPILER_RT_HAS_SANITIZER_COMMON AND DFSAN_SUPPORTED_ARCH AND
diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt
index 7f8664e..ad1437e 100644
--- a/include/CMakeLists.txt
+++ b/include/CMakeLists.txt
@@ -2,6 +2,7 @@
   sanitizer/allocator_interface.h
   sanitizer/asan_interface.h
   sanitizer/common_interface_defs.h
+  sanitizer/coverage_interface.h
   sanitizer/dfsan_interface.h
   sanitizer/linux_syscall_hooks.h
   sanitizer/lsan_interface.h
diff --git a/include/sanitizer/asan_interface.h b/include/sanitizer/asan_interface.h
index 4353914..7763389 100644
--- a/include/sanitizer/asan_interface.h
+++ b/include/sanitizer/asan_interface.h
@@ -114,8 +114,7 @@
   // Returns the old value.
   int __asan_set_error_exit_code(int exit_code);
 
-  // Sets the callback to be called right before death on error.
-  // Passing 0 will unset the callback.
+  // Deprecated. Call __sanitizer_set_death_callback instead.
   void __asan_set_death_callback(void (*callback)(void));
 
   void __asan_set_error_report_callback(void (*callback)(const char*));
diff --git a/include/sanitizer/common_interface_defs.h b/include/sanitizer/common_interface_defs.h
index 9cb5ad8..ef645e5 100644
--- a/include/sanitizer/common_interface_defs.h
+++ b/include/sanitizer/common_interface_defs.h
@@ -62,18 +62,6 @@
   void __sanitizer_unaligned_store32(void *p, uint32_t x);
   void __sanitizer_unaligned_store64(void *p, uint64_t x);
 
-  // Initialize coverage.
-  void __sanitizer_cov_init();
-  // Record and dump coverage info.
-  void __sanitizer_cov_dump();
-  // Open <name>.sancov.packed in the coverage directory and return the file
-  // descriptor. Returns -1 on failure, or if coverage dumping is disabled.
-  // This is intended for use by sandboxing code.
-  intptr_t __sanitizer_maybe_open_cov_file(const char *name);
-  // Get the number of total unique covered entities (blocks, edges, calls).
-  // This can be useful for coverage-directed in-process fuzzers.
-  uintptr_t __sanitizer_get_total_unique_coverage();
-
   // Annotate the current state of a contiguous container, such as
   // std::vector, std::string or similar.
   // A contiguous container is a container that keeps all of its elements
@@ -120,6 +108,9 @@
   // Print the stack trace leading to this call. Useful for debugging user code.
   void __sanitizer_print_stack_trace();
 
+  // Sets the callback to be called right before death on error.
+  // Passing 0 will unset the callback.
+  void __sanitizer_set_death_callback(void (*callback)(void));
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/include/sanitizer/coverage_interface.h b/include/sanitizer/coverage_interface.h
new file mode 100644
index 0000000..88a7e48
--- /dev/null
+++ b/include/sanitizer/coverage_interface.h
@@ -0,0 +1,46 @@
+//===-- sanitizer/coverage_interface.h --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Public interface for sanitizer coverage.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_COVERAG_INTERFACE_H
+#define SANITIZER_COVERAG_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+  // Initialize coverage.
+  void __sanitizer_cov_init();
+  // Record and dump coverage info.
+  void __sanitizer_cov_dump();
+  // Open <name>.sancov.packed in the coverage directory and return the file
+  // descriptor. Returns -1 on failure, or if coverage dumping is disabled.
+  // This is intended for use by sandboxing code.
+  intptr_t __sanitizer_maybe_open_cov_file(const char *name);
+  // Get the number of total unique covered entities (blocks, edges, calls).
+  // This can be useful for coverage-directed in-process fuzzers.
+  uintptr_t __sanitizer_get_total_unique_coverage();
+
+  // Reset the basic-block (edge) coverage to the initial state.
+  // Useful for in-process fuzzing to start collecting coverage from scratch.
+  // Experimental, will likely not work for multi-threaded process.
+  void __sanitizer_reset_coverage();
+  // Set *data to the array of covered PCs and return the size of that array.
+  // Some of the entries in *data will be zero.
+  uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // SANITIZER_COVERAG_INTERFACE_H
diff --git a/include/sanitizer/msan_interface.h b/include/sanitizer/msan_interface.h
index 5be5860..f54bcaa 100644
--- a/include/sanitizer/msan_interface.h
+++ b/include/sanitizer/msan_interface.h
@@ -25,6 +25,11 @@
   /* Get raw origin for an address. */
   uint32_t __msan_get_origin(const volatile void *a);
 
+  /* Test that this_id is a descendant of prev_id (or they are simply equal).
+   * "descendant" here means they are part of the same chain, created with
+   * __msan_chain_origin. */
+  int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
+
   /* Returns non-zero if tracking origins. */
   int __msan_get_track_origins();
 
@@ -38,7 +43,9 @@
      contents). */
   void __msan_unpoison_string(const volatile char *a);
 
-  /* Make memory region fully uninitialized (without changing its contents). */
+  /* Make memory region fully uninitialized (without changing its contents).
+     This is a legacy interface that does not update origin information. Use
+     __msan_allocated_memory() instead. */
   void __msan_poison(const volatile void *a, size_t size);
 
   /* Make memory region partially uninitialized (without changing its contents).
diff --git a/lib/Makefile.mk b/lib/Makefile.mk
index ed9690d..7eb6489 100644
--- a/lib/Makefile.mk
+++ b/lib/Makefile.mk
@@ -12,11 +12,8 @@
 # Add submodules.
 SubDirs += asan
 SubDirs += builtins
-SubDirs += dfsan
 SubDirs += interception
 SubDirs += lsan
-SubDirs += msan
 SubDirs += profile
 SubDirs += sanitizer_common
-SubDirs += tsan
 SubDirs += ubsan
diff --git a/lib/asan/Android.mk b/lib/asan/Android.mk
index 4a611d3..4314c02 100644
--- a/lib/asan/Android.mk
+++ b/lib/asan/Android.mk
@@ -23,8 +23,9 @@
 
 asan_rtl_files := \
   asan_activation.cc \
-  asan_allocator2.cc \
+  asan_allocator.cc \
   asan_fake_stack.cc \
+  asan_flags.cc \
   asan_globals.cc \
   asan_interceptors.cc \
   asan_linux.cc \
@@ -38,6 +39,7 @@
   asan_rtl.cc \
   asan_stack.cc \
   asan_stats.cc \
+  asan_suppressions.cc \
   asan_thread.cc \
   asan_win.cc \
   ../interception/interception_linux.cc \
@@ -51,6 +53,7 @@
   ../sanitizer_common/sanitizer_deadlock_detector1.cc \
   ../sanitizer_common/sanitizer_deadlock_detector2.cc \
   ../sanitizer_common/sanitizer_flags.cc \
+  ../sanitizer_common/sanitizer_flag_parser.cc \
   ../sanitizer_common/sanitizer_libc.cc \
   ../sanitizer_common/sanitizer_libignore.cc \
   ../sanitizer_common/sanitizer_linux.cc \
@@ -182,6 +185,7 @@
     external/compiler-rt/lib/asan/tests \
     external/compiler-rt/lib/sanitizer_common/tests
 LOCAL_CFLAGS += \
+    -Wno-non-virtual-dtor \
     -Wno-unused-parameter \
     -Wno-sign-compare \
     -DASAN_UAR=0 \
diff --git a/lib/asan/CMakeLists.txt b/lib/asan/CMakeLists.txt
index 6251f06..90cb6f8 100644
--- a/lib/asan/CMakeLists.txt
+++ b/lib/asan/CMakeLists.txt
@@ -1,10 +1,11 @@
 # Build for the AddressSanitizer runtime support library.
 
 set(ASAN_SOURCES
-  asan_allocator2.cc
+  asan_allocator.cc
   asan_activation.cc
   asan_debugging.cc
   asan_fake_stack.cc
+  asan_flags.cc
   asan_globals.cc
   asan_interceptors.cc
   asan_linux.cc
@@ -18,6 +19,7 @@
   asan_rtl.cc
   asan_stack.cc
   asan_stats.cc
+  asan_suppressions.cc
   asan_thread.cc
   asan_win.cc)
 
@@ -29,10 +31,6 @@
 
 include_directories(..)
 
-if(ANDROID)
-  include_directories(${COMPILER_RT_EXTRA_ANDROID_HEADERS})
-endif()
-
 set(ASAN_CFLAGS ${SANITIZER_COMMON_CFLAGS})
 append_no_rtti_flag(ASAN_CFLAGS)
 
@@ -67,8 +65,8 @@
     add_compiler_rt_darwin_object_library(RTAsan ${os}
       ARCH ${ASAN_SUPPORTED_ARCH}
       SOURCES ${ASAN_SOURCES} ${ASAN_CXX_SOURCES}
-      CFLAGS ${ASAN_CFLAGS}
-      DEFS ${ASAN_COMMON_DEFINITIONS})
+      CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+      DEFS ${ASAN_DYNAMIC_DEFINITIONS})
   endforeach()
 else()
   foreach(arch ${ASAN_SUPPORTED_ARCH})
@@ -81,12 +79,10 @@
     add_compiler_rt_object_library(RTAsan_preinit ${arch}
       SOURCES ${ASAN_PREINIT_SOURCES} CFLAGS ${ASAN_CFLAGS}
       DEFS ${ASAN_COMMON_DEFINITIONS})
-    if (COMPILER_RT_BUILD_SHARED_ASAN)
-      add_compiler_rt_object_library(RTAsan_dynamic ${arch}
-        SOURCES ${ASAN_SOURCES} ${ASAN_CXX_SOURCES}
-        CFLAGS ${ASAN_DYNAMIC_CFLAGS}
-        DEFS ${ASAN_DYNAMIC_DEFINITIONS})
-    endif()
+    add_compiler_rt_object_library(RTAsan_dynamic ${arch}
+      SOURCES ${ASAN_SOURCES} ${ASAN_CXX_SOURCES}
+      CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+      DEFS ${ASAN_DYNAMIC_DEFINITIONS})
   endforeach()
 endif()
 
@@ -100,8 +96,8 @@
               $<TARGET_OBJECTS:RTInterception.${os}>
               $<TARGET_OBJECTS:RTSanitizerCommon.${os}>
               $<TARGET_OBJECTS:RTLSanCommon.${os}>
-      CFLAGS ${ASAN_CFLAGS}
-      DEFS ${ASAN_COMMON_DEFINITIONS})
+      CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+      DEFS ${ASAN_DYNAMIC_DEFINITIONS})
     add_dependencies(asan clang_rt.asan_${os}_dynamic)
   endforeach()
 else()
@@ -131,30 +127,27 @@
       DEFS ${ASAN_COMMON_DEFINITIONS})
     add_dependencies(asan clang_rt.asan_cxx-${arch})
 
-    if (COMPILER_RT_BUILD_SHARED_ASAN)
-      add_compiler_rt_runtime(clang_rt.asan-preinit-${arch} ${arch} STATIC
-        SOURCES $<TARGET_OBJECTS:RTAsan_preinit.${arch}>
-        CFLAGS ${ASAN_CFLAGS}
-        DEFS ${ASAN_COMMON_DEFINITIONS})
-      add_dependencies(asan clang_rt.asan-preinit-${arch})
+    add_compiler_rt_runtime(clang_rt.asan-preinit-${arch} ${arch} STATIC
+      SOURCES $<TARGET_OBJECTS:RTAsan_preinit.${arch}>
+      CFLAGS ${ASAN_CFLAGS}
+      DEFS ${ASAN_COMMON_DEFINITIONS})
+    add_dependencies(asan clang_rt.asan-preinit-${arch})
 
-      if (WIN32)
-         set(SHARED_ASAN_NAME clang_rt.asan_dynamic-${arch}${COMPILER_RT_OS_SUFFIX})
-      else()
-         set(SHARED_ASAN_NAME clang_rt.asan-${arch}${COMPILER_RT_OS_SUFFIX})
-      endif()
-
-      add_compiler_rt_runtime(clang_rt.asan-dynamic-${arch} ${arch} SHARED
-        OUTPUT_NAME ${SHARED_ASAN_NAME}
-        SOURCES $<TARGET_OBJECTS:RTAsan_dynamic.${arch}>
-                ${ASAN_COMMON_RUNTIME_OBJECTS}
-        CFLAGS ${ASAN_DYNAMIC_CFLAGS}
-        DEFS ${ASAN_DYNAMIC_DEFINITIONS})
-      target_link_libraries(clang_rt.asan-dynamic-${arch} ${ASAN_DYNAMIC_LIBS})
-      add_dependencies(asan clang_rt.asan-dynamic-${arch})
+    if (WIN32)
+      set(SHARED_ASAN_NAME clang_rt.asan_dynamic-${arch}${COMPILER_RT_OS_SUFFIX})
+    else()
+      set(SHARED_ASAN_NAME clang_rt.asan-${arch}${COMPILER_RT_OS_SUFFIX})
     endif()
+    add_compiler_rt_runtime(clang_rt.asan-dynamic-${arch} ${arch} SHARED
+      OUTPUT_NAME ${SHARED_ASAN_NAME}
+      SOURCES $<TARGET_OBJECTS:RTAsan_dynamic.${arch}>
+              ${ASAN_COMMON_RUNTIME_OBJECTS}
+      CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+      DEFS ${ASAN_DYNAMIC_DEFINITIONS})
+    target_link_libraries(clang_rt.asan-dynamic-${arch} ${ASAN_DYNAMIC_LIBS})
+    add_dependencies(asan clang_rt.asan-dynamic-${arch})
 
-    if (UNIX AND NOT ${arch} STREQUAL "i386" AND NOT ${arch} STREQUAL "i686")
+    if (UNIX AND NOT ${arch} MATCHES "i386|i686")
       add_sanitizer_rt_symbols(clang_rt.asan_cxx-${arch})
       add_dependencies(asan clang_rt.asan_cxx-${arch}-symbols)
       add_sanitizer_rt_symbols(clang_rt.asan-${arch} asan.syms.extra)
diff --git a/lib/asan/README.txt b/lib/asan/README.txt
index b9c43ac..8cc9bb1 100644
--- a/lib/asan/README.txt
+++ b/lib/asan/README.txt
@@ -1,7 +1,6 @@
 AddressSanitizer RT
 ================================
-This directory contains sources of the AddressSanitizer (asan) runtime library.
-We are in the process of integrating AddressSanitizer with LLVM, stay tuned.
+This directory contains sources of the AddressSanitizer (ASan) runtime library.
 
 Directory structure:
 README.txt       : This file.
@@ -13,14 +12,13 @@
 
 Also ASan runtime needs the following libraries:
 lib/interception/      : Machinery used to intercept function calls.
-lib/sanitizer_common/  : Code shared between ASan and TSan.
+lib/sanitizer_common/  : Code shared between various sanitizers.
 
-Currently ASan runtime can be built by both make and cmake build systems.
-(see compiler-rt/make and files Makefile.mk for make-based build and
-files CMakeLists.txt for cmake-based build).
+ASan runtime currently also embeds part of LeakSanitizer runtime for
+leak detection (lib/lsan/lsan_common.{cc,h}).
 
-ASan unit and output tests work only with cmake. You may run this
-command from the root of your cmake build tree:
+ASan runtime can only be built by CMake. You can run ASan tests
+from the root of your CMake build tree:
 
 make check-asan
 
diff --git a/lib/asan/asan_activation.cc b/lib/asan/asan_activation.cc
index 23273be..3bc0198 100644
--- a/lib/asan/asan_activation.cc
+++ b/lib/asan/asan_activation.cc
@@ -16,32 +16,106 @@
 #include "asan_allocator.h"
 #include "asan_flags.h"
 #include "asan_internal.h"
+#include "asan_poisoning.h"
+#include "asan_stack.h"
 #include "sanitizer_common/sanitizer_flags.h"
 
 namespace __asan {
 
 static struct AsanDeactivatedFlags {
-  int quarantine_size;
-  int max_redzone;
+  AllocatorOptions allocator_options;
   int malloc_context_size;
   bool poison_heap;
+  bool coverage;
+  const char *coverage_dir;
+
+  void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) {
+#define ASAN_ACTIVATION_FLAG(Type, Name) \
+  RegisterFlag(parser, #Name, "", &f->Name);
+#define COMMON_ACTIVATION_FLAG(Type, Name) \
+  RegisterFlag(parser, #Name, "", &cf->Name);
+#include "asan_activation_flags.inc"
+#undef ASAN_ACTIVATION_FLAG
+#undef COMMON_ACTIVATION_FLAG
+
+    RegisterIncludeFlag(parser, cf);
+  }
+
+  void OverrideFromActivationFlags() {
+    Flags f;
+    CommonFlags cf;
+    FlagParser parser;
+    RegisterActivationFlags(&parser, &f, &cf);
+
+    // Copy the current activation flags.
+    allocator_options.CopyTo(&f, &cf);
+    cf.malloc_context_size = malloc_context_size;
+    f.poison_heap = poison_heap;
+    cf.coverage = coverage;
+    cf.coverage_dir = coverage_dir;
+    cf.verbosity = Verbosity();
+    cf.help = false; // this is activation-specific help
+
+    // Check if activation flags need to be overriden.
+    if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) {
+      parser.ParseString(env);
+    }
+
+    // Override from getprop asan.options.
+    char buf[100];
+    GetExtraActivationFlags(buf, sizeof(buf));
+    parser.ParseString(buf);
+
+    SetVerbosity(cf.verbosity);
+
+    if (Verbosity()) ReportUnrecognizedFlags();
+
+    if (cf.help) parser.PrintFlagDescriptions();
+
+    allocator_options.SetFrom(&f, &cf);
+    malloc_context_size = cf.malloc_context_size;
+    poison_heap = f.poison_heap;
+    coverage = cf.coverage;
+    coverage_dir = cf.coverage_dir;
+  }
+
+  void Print() {
+    Report(
+        "quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
+        "malloc_context_size %d, alloc_dealloc_mismatch %d, "
+        "allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
+        allocator_options.quarantine_size_mb, allocator_options.max_redzone,
+        poison_heap, malloc_context_size,
+        allocator_options.alloc_dealloc_mismatch,
+        allocator_options.may_return_null, coverage, coverage_dir);
+  }
 } asan_deactivated_flags;
 
 static bool asan_is_deactivated;
 
-void AsanStartDeactivated() {
+void AsanDeactivate() {
+  CHECK(!asan_is_deactivated);
   VReport(1, "Deactivating ASan\n");
-  // Save flag values.
-  asan_deactivated_flags.quarantine_size = flags()->quarantine_size;
-  asan_deactivated_flags.max_redzone = flags()->max_redzone;
-  asan_deactivated_flags.poison_heap = flags()->poison_heap;
-  asan_deactivated_flags.malloc_context_size =
-      common_flags()->malloc_context_size;
 
-  flags()->quarantine_size = 0;
-  flags()->max_redzone = 16;
-  flags()->poison_heap = false;
-  common_flags()->malloc_context_size = 0;
+  // Stash runtime state.
+  GetAllocatorOptions(&asan_deactivated_flags.allocator_options);
+  asan_deactivated_flags.malloc_context_size = GetMallocContextSize();
+  asan_deactivated_flags.poison_heap = CanPoisonMemory();
+  asan_deactivated_flags.coverage = common_flags()->coverage;
+  asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir;
+
+  // Deactivate the runtime.
+  SetCanPoisonMemory(false);
+  SetMallocContextSize(1);
+  ReInitializeCoverage(false, nullptr);
+
+  AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
+  disabled.quarantine_size_mb = 0;
+  disabled.min_redzone = 16;  // Redzone must be at least 16 bytes long.
+  disabled.max_redzone = 16;
+  disabled.alloc_dealloc_mismatch = false;
+  disabled.may_return_null = true;
+  ReInitializeAllocator(disabled);
 
   asan_is_deactivated = true;
 }
@@ -50,25 +124,19 @@
   if (!asan_is_deactivated) return;
   VReport(1, "Activating ASan\n");
 
-  // Restore flag values.
-  // FIXME: this is not atomic, and there may be other threads alive.
-  flags()->quarantine_size = asan_deactivated_flags.quarantine_size;
-  flags()->max_redzone = asan_deactivated_flags.max_redzone;
-  flags()->poison_heap = asan_deactivated_flags.poison_heap;
-  common_flags()->malloc_context_size =
-      asan_deactivated_flags.malloc_context_size;
+  asan_deactivated_flags.OverrideFromActivationFlags();
 
-  ParseExtraActivationFlags();
-
-  ReInitializeAllocator();
+  SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
+  SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
+  ReInitializeCoverage(asan_deactivated_flags.coverage,
+                       asan_deactivated_flags.coverage_dir);
+  ReInitializeAllocator(asan_deactivated_flags.allocator_options);
 
   asan_is_deactivated = false;
-  VReport(
-      1,
-      "quarantine_size %d, max_redzone %d, poison_heap %d, malloc_context_size "
-      "%d\n",
-      flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap,
-      common_flags()->malloc_context_size);
+  if (Verbosity()) {
+    Report("Activated with flags:\n");
+    asan_deactivated_flags.Print();
+  }
 }
 
 }  // namespace __asan
diff --git a/lib/asan/asan_activation.h b/lib/asan/asan_activation.h
index dafb840..d5e1ce4 100644
--- a/lib/asan/asan_activation.h
+++ b/lib/asan/asan_activation.h
@@ -16,7 +16,7 @@
 #define ASAN_ACTIVATION_H
 
 namespace __asan {
-void AsanStartDeactivated();
+void AsanDeactivate();
 void AsanActivate();
 }  // namespace __asan
 
diff --git a/lib/asan/asan_activation_flags.inc b/lib/asan/asan_activation_flags.inc
new file mode 100644
index 0000000..d4c089e
--- /dev/null
+++ b/lib/asan/asan_activation_flags.inc
@@ -0,0 +1,35 @@
+//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A subset of ASan (and common) runtime flags supported at activation time.
+//
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_ACTIVATION_FLAG
+# error "Define ASAN_ACTIVATION_FLAG prior to including this file!"
+#endif
+
+#ifndef COMMON_ACTIVATION_FLAG
+# error "Define COMMON_ACTIVATION_FLAG prior to including this file!"
+#endif
+
+// ASAN_ACTIVATION_FLAG(Type, Name)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+ASAN_ACTIVATION_FLAG(int, redzone)
+ASAN_ACTIVATION_FLAG(int, max_redzone)
+ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
+ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
+ASAN_ACTIVATION_FLAG(bool, poison_heap)
+
+COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null)
+COMMON_ACTIVATION_FLAG(int, malloc_context_size)
+COMMON_ACTIVATION_FLAG(bool, coverage)
+COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
+COMMON_ACTIVATION_FLAG(int, verbosity)
+COMMON_ACTIVATION_FLAG(bool, help)
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
new file mode 100644
index 0000000..fd63ac6
--- /dev/null
+++ b/lib/asan/asan_allocator.cc
@@ -0,0 +1,909 @@
+//===-- asan_allocator.cc -------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Implementation of ASan's memory allocator, 2-nd version.
+// This variant uses the allocator from sanitizer_common, i.e. the one shared
+// with ThreadSanitizer and MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+#include "asan_allocator.h"
+
+#include "asan_mapping.h"
+#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_quarantine.h"
+#include "lsan/lsan_common.h"
+
+namespace __asan {
+
+// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
+// We use adaptive redzones: for larger allocation larger redzones are used.
+static u32 RZLog2Size(u32 rz_log) {
+  CHECK_LT(rz_log, 8);
+  return 16 << rz_log;
+}
+
+static u32 RZSize2Log(u32 rz_size) {
+  CHECK_GE(rz_size, 16);
+  CHECK_LE(rz_size, 2048);
+  CHECK(IsPowerOfTwo(rz_size));
+  u32 res = Log2(rz_size) - 4;
+  CHECK_EQ(rz_size, RZLog2Size(res));
+  return res;
+}
+
+static AsanAllocator &get_allocator();
+
+// The memory chunk allocated from the underlying allocator looks like this:
+// L L L L L L H H U U U U U U R R
+//   L -- left redzone words (0 or more bytes)
+//   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
+//   U -- user memory.
+//   R -- right redzone (0 or more bytes)
+// ChunkBase consists of ChunkHeader and other bytes that overlap with user
+// memory.
+
+// If the left redzone is greater than the ChunkHeader size we store a magic
+// value in the first uptr word of the memory block and store the address of
+// ChunkBase in the next uptr.
+// M B L L L L L L L L L  H H U U U U U U
+//   |                    ^
+//   ---------------------|
+//   M -- magic value kAllocBegMagic
+//   B -- address of ChunkHeader pointing to the first 'H'
+static const uptr kAllocBegMagic = 0xCC6E96B9;
+
+struct ChunkHeader {
+  // 1-st 8 bytes.
+  u32 chunk_state       : 8;  // Must be first.
+  u32 alloc_tid         : 24;
+
+  u32 free_tid          : 24;
+  u32 from_memalign     : 1;
+  u32 alloc_type        : 2;
+  u32 rz_log            : 3;
+  u32 lsan_tag          : 2;
+  // 2-nd 8 bytes
+  // This field is used for small sizes. For large sizes it is equal to
+  // SizeClassMap::kMaxSize and the actual size is stored in the
+  // SecondaryAllocator's metadata.
+  u32 user_requested_size;
+  u32 alloc_context_id;
+};
+
+struct ChunkBase : ChunkHeader {
+  // Header2, intersects with user memory.
+  u32 free_context_id;
+};
+
+static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
+static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
+COMPILER_CHECK(kChunkHeaderSize == 16);
+COMPILER_CHECK(kChunkHeader2Size <= 16);
+
+// Every chunk of memory allocated by this allocator can be in one of 3 states:
+// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
+// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
+// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
+enum {
+  CHUNK_AVAILABLE  = 0,  // 0 is the default value even if we didn't set it.
+  CHUNK_ALLOCATED  = 2,
+  CHUNK_QUARANTINE = 3
+};
+
+struct AsanChunk: ChunkBase {
+  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
+  uptr UsedSize(bool locked_version = false) {
+    if (user_requested_size != SizeClassMap::kMaxSize)
+      return user_requested_size;
+    return *reinterpret_cast<uptr *>(
+               get_allocator().GetMetaData(AllocBeg(locked_version)));
+  }
+  void *AllocBeg(bool locked_version = false) {
+    if (from_memalign) {
+      if (locked_version)
+        return get_allocator().GetBlockBeginFastLocked(
+            reinterpret_cast<void *>(this));
+      return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
+    }
+    return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
+  }
+  bool AddrIsInside(uptr addr, bool locked_version = false) {
+    return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
+  }
+};
+
+struct QuarantineCallback {
+  explicit QuarantineCallback(AllocatorCache *cache)
+      : cache_(cache) {
+  }
+
+  void Recycle(AsanChunk *m) {
+    CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+    atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
+    CHECK_NE(m->alloc_tid, kInvalidTid);
+    CHECK_NE(m->free_tid, kInvalidTid);
+    PoisonShadow(m->Beg(),
+                 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+                 kAsanHeapLeftRedzoneMagic);
+    void *p = reinterpret_cast<void *>(m->AllocBeg());
+    if (p != m) {
+      uptr *alloc_magic = reinterpret_cast<uptr *>(p);
+      CHECK_EQ(alloc_magic[0], kAllocBegMagic);
+      // Clear the magic value, as allocator internals may overwrite the
+      // contents of deallocated chunk, confusing GetAsanChunk lookup.
+      alloc_magic[0] = 0;
+      CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
+    }
+
+    // Statistics.
+    AsanStats &thread_stats = GetCurrentThreadStats();
+    thread_stats.real_frees++;
+    thread_stats.really_freed += m->UsedSize();
+
+    get_allocator().Deallocate(cache_, p);
+  }
+
+  void *Allocate(uptr size) {
+    return get_allocator().Allocate(cache_, size, 1, false);
+  }
+
+  void Deallocate(void *p) {
+    get_allocator().Deallocate(cache_, p);
+  }
+
+  AllocatorCache *cache_;
+};
+
+typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
+typedef AsanQuarantine::Cache QuarantineCache;
+
+void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
+  PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
+  // Statistics.
+  AsanStats &thread_stats = GetCurrentThreadStats();
+  thread_stats.mmaps++;
+  thread_stats.mmaped += size;
+}
+void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
+  PoisonShadow(p, size, 0);
+  // We are about to unmap a chunk of user memory.
+  // Mark the corresponding shadow memory as not needed.
+  FlushUnneededASanShadowMemory(p, size);
+  // Statistics.
+  AsanStats &thread_stats = GetCurrentThreadStats();
+  thread_stats.munmaps++;
+  thread_stats.munmaped += size;
+}
+
+// We can not use THREADLOCAL because it is not supported on some of the
+// platforms we care about (OSX 10.6, Android).
+// static THREADLOCAL AllocatorCache cache;
+AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
+  CHECK(ms);
+  return &ms->allocator_cache;
+}
+
+QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
+  CHECK(ms);
+  CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
+  return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
+}
+
+void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
+  quarantine_size_mb = f->quarantine_size_mb;
+  min_redzone = f->redzone;
+  max_redzone = f->max_redzone;
+  may_return_null = cf->allocator_may_return_null;
+  alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
+}
+
+void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
+  f->quarantine_size_mb = quarantine_size_mb;
+  f->redzone = min_redzone;
+  f->max_redzone = max_redzone;
+  cf->allocator_may_return_null = may_return_null;
+  f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
+}
+
+struct Allocator {
+  static const uptr kMaxAllowedMallocSize =
+      FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
+  static const uptr kMaxThreadLocalQuarantine =
+      FIRST_32_SECOND_64(1 << 18, 1 << 20);
+
+  AsanAllocator allocator;
+  AsanQuarantine quarantine;
+  StaticSpinMutex fallback_mutex;
+  AllocatorCache fallback_allocator_cache;
+  QuarantineCache fallback_quarantine_cache;
+
+  // ------------------- Options --------------------------
+  atomic_uint16_t min_redzone;
+  atomic_uint16_t max_redzone;
+  atomic_uint8_t alloc_dealloc_mismatch;
+
+  // ------------------- Initialization ------------------------
+  explicit Allocator(LinkerInitialized)
+      : quarantine(LINKER_INITIALIZED),
+        fallback_quarantine_cache(LINKER_INITIALIZED) {}
+
+  void CheckOptions(const AllocatorOptions &options) const {
+    CHECK_GE(options.min_redzone, 16);
+    CHECK_GE(options.max_redzone, options.min_redzone);
+    CHECK_LE(options.max_redzone, 2048);
+    CHECK(IsPowerOfTwo(options.min_redzone));
+    CHECK(IsPowerOfTwo(options.max_redzone));
+  }
+
+  void SharedInitCode(const AllocatorOptions &options) {
+    CheckOptions(options);
+    quarantine.Init((uptr)options.quarantine_size_mb << 20,
+                    kMaxThreadLocalQuarantine);
+    atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
+                 memory_order_release);
+    atomic_store(&min_redzone, options.min_redzone, memory_order_release);
+    atomic_store(&max_redzone, options.max_redzone, memory_order_release);
+  }
+
+  void Initialize(const AllocatorOptions &options) {
+    allocator.Init(options.may_return_null);
+    SharedInitCode(options);
+  }
+
+  void ReInitialize(const AllocatorOptions &options) {
+    allocator.SetMayReturnNull(options.may_return_null);
+    SharedInitCode(options);
+  }
+
+  void GetOptions(AllocatorOptions *options) const {
+    options->quarantine_size_mb = quarantine.GetSize() >> 20;
+    options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
+    options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
+    options->may_return_null = allocator.MayReturnNull();
+    options->alloc_dealloc_mismatch =
+        atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
+  }
+
+  // -------------------- Helper methods. -------------------------
+  uptr ComputeRZLog(uptr user_requested_size) {
+    u32 rz_log =
+      user_requested_size <= 64        - 16   ? 0 :
+      user_requested_size <= 128       - 32   ? 1 :
+      user_requested_size <= 512       - 64   ? 2 :
+      user_requested_size <= 4096      - 128  ? 3 :
+      user_requested_size <= (1 << 14) - 256  ? 4 :
+      user_requested_size <= (1 << 15) - 512  ? 5 :
+      user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
+    u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
+    u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
+    return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
+  }
+
+  // We have an address between two chunks, and we want to report just one.
+  AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
+                         AsanChunk *right_chunk) {
+    // Prefer an allocated chunk over freed chunk and freed chunk
+    // over available chunk.
+    if (left_chunk->chunk_state != right_chunk->chunk_state) {
+      if (left_chunk->chunk_state == CHUNK_ALLOCATED)
+        return left_chunk;
+      if (right_chunk->chunk_state == CHUNK_ALLOCATED)
+        return right_chunk;
+      if (left_chunk->chunk_state == CHUNK_QUARANTINE)
+        return left_chunk;
+      if (right_chunk->chunk_state == CHUNK_QUARANTINE)
+        return right_chunk;
+    }
+    // Same chunk_state: choose based on offset.
+    sptr l_offset = 0, r_offset = 0;
+    CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
+    CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
+    if (l_offset < r_offset)
+      return left_chunk;
+    return right_chunk;
+  }
+
+  // -------------------- Allocation/Deallocation routines ---------------
+  void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
+                 AllocType alloc_type, bool can_fill) {
+    if (UNLIKELY(!asan_inited))
+      AsanInitFromRtl();
+    Flags &fl = *flags();
+    CHECK(stack);
+    const uptr min_alignment = SHADOW_GRANULARITY;
+    if (alignment < min_alignment)
+      alignment = min_alignment;
+    if (size == 0) {
+      // We'd be happy to avoid allocating memory for zero-size requests, but
+      // some programs/tests depend on this behavior and assume that malloc
+      // would not return NULL even for zero-size allocations. Moreover, it
+      // looks like operator new should never return NULL, and results of
+      // consecutive "new" calls must be different even if the allocated size
+      // is zero.
+      size = 1;
+    }
+    CHECK(IsPowerOfTwo(alignment));
+    uptr rz_log = ComputeRZLog(size);
+    uptr rz_size = RZLog2Size(rz_log);
+    uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
+    uptr needed_size = rounded_size + rz_size;
+    if (alignment > min_alignment)
+      needed_size += alignment;
+    bool using_primary_allocator = true;
+    // If we are allocating from the secondary allocator, there will be no
+    // automatic right redzone, so add the right redzone manually.
+    if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
+      needed_size += rz_size;
+      using_primary_allocator = false;
+    }
+    CHECK(IsAligned(needed_size, min_alignment));
+    if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
+      Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
+             (void*)size);
+      return allocator.ReturnNullOrDie();
+    }
+
+    AsanThread *t = GetCurrentThread();
+    void *allocated;
+    bool check_rss_limit = true;
+    if (t) {
+      AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+      allocated =
+          allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+    } else {
+      SpinMutexLock l(&fallback_mutex);
+      AllocatorCache *cache = &fallback_allocator_cache;
+      allocated =
+          allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+    }
+
+    if (!allocated)
+      return allocator.ReturnNullOrDie();
+
+    if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
+      // Heap poisoning is enabled, but the allocator provides an unpoisoned
+      // chunk. This is possible if CanPoisonMemory() was false for some
+      // time, for example, due to flags()->start_disabled.
+      // Anyway, poison the block before using it for anything else.
+      uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
+      PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
+    }
+
+    uptr alloc_beg = reinterpret_cast<uptr>(allocated);
+    uptr alloc_end = alloc_beg + needed_size;
+    uptr beg_plus_redzone = alloc_beg + rz_size;
+    uptr user_beg = beg_plus_redzone;
+    if (!IsAligned(user_beg, alignment))
+      user_beg = RoundUpTo(user_beg, alignment);
+    uptr user_end = user_beg + size;
+    CHECK_LE(user_end, alloc_end);
+    uptr chunk_beg = user_beg - kChunkHeaderSize;
+    AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+    m->alloc_type = alloc_type;
+    m->rz_log = rz_log;
+    u32 alloc_tid = t ? t->tid() : 0;
+    m->alloc_tid = alloc_tid;
+    CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
+    m->free_tid = kInvalidTid;
+    m->from_memalign = user_beg != beg_plus_redzone;
+    if (alloc_beg != chunk_beg) {
+      CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
+      reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
+      reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
+    }
+    if (using_primary_allocator) {
+      CHECK(size);
+      m->user_requested_size = size;
+      CHECK(allocator.FromPrimary(allocated));
+    } else {
+      CHECK(!allocator.FromPrimary(allocated));
+      m->user_requested_size = SizeClassMap::kMaxSize;
+      uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
+      meta[0] = size;
+      meta[1] = chunk_beg;
+    }
+
+    m->alloc_context_id = StackDepotPut(*stack);
+
+    uptr size_rounded_down_to_granularity =
+        RoundDownTo(size, SHADOW_GRANULARITY);
+    // Unpoison the bulk of the memory region.
+    if (size_rounded_down_to_granularity)
+      PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
+    // Deal with the end of the region if size is not aligned to granularity.
+    if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
+      u8 *shadow =
+          (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
+      *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
+    }
+
+    AsanStats &thread_stats = GetCurrentThreadStats();
+    thread_stats.mallocs++;
+    thread_stats.malloced += size;
+    thread_stats.malloced_redzones += needed_size - size;
+    uptr class_id =
+        Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
+    thread_stats.malloced_by_size[class_id]++;
+    if (needed_size > SizeClassMap::kMaxSize)
+      thread_stats.malloc_large++;
+
+    void *res = reinterpret_cast<void *>(user_beg);
+    if (can_fill && fl.max_malloc_fill_size) {
+      uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
+      REAL(memset)(res, fl.malloc_fill_byte, fill_size);
+    }
+#if CAN_SANITIZE_LEAKS
+    m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
+                                                 : __lsan::kDirectlyLeaked;
+#endif
+    // Must be the last mutation of metadata in this function.
+    atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
+    ASAN_MALLOC_HOOK(res, size);
+    return res;
+  }
+
+  void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
+                                   BufferedStackTrace *stack) {
+    u8 old_chunk_state = CHUNK_ALLOCATED;
+    // Flip the chunk_state atomically to avoid race on double-free.
+    if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
+                                        CHUNK_QUARANTINE, memory_order_acquire))
+      ReportInvalidFree(ptr, old_chunk_state, stack);
+    CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
+  }
+
+  // Expects the chunk to already be marked as quarantined by using
+  // AtomicallySetQuarantineFlag.
+  void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
+                       AllocType alloc_type) {
+    CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+
+    if (m->alloc_type != alloc_type) {
+      if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
+        ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
+                                (AllocType)alloc_type);
+      }
+    }
+
+    CHECK_GE(m->alloc_tid, 0);
+    if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
+      CHECK_EQ(m->free_tid, kInvalidTid);
+    AsanThread *t = GetCurrentThread();
+    m->free_tid = t ? t->tid() : 0;
+    m->free_context_id = StackDepotPut(*stack);
+    // Poison the region.
+    PoisonShadow(m->Beg(),
+                 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+                 kAsanHeapFreeMagic);
+
+    AsanStats &thread_stats = GetCurrentThreadStats();
+    thread_stats.frees++;
+    thread_stats.freed += m->UsedSize();
+
+    // Push into quarantine.
+    if (t) {
+      AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
+      AllocatorCache *ac = GetAllocatorCache(ms);
+      quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
+                           m->UsedSize());
+    } else {
+      SpinMutexLock l(&fallback_mutex);
+      AllocatorCache *ac = &fallback_allocator_cache;
+      quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
+                           m->UsedSize());
+    }
+  }
+
+  void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
+                  AllocType alloc_type) {
+    uptr p = reinterpret_cast<uptr>(ptr);
+    if (p == 0) return;
+
+    uptr chunk_beg = p - kChunkHeaderSize;
+    AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+    if (delete_size && flags()->new_delete_type_mismatch &&
+        delete_size != m->UsedSize()) {
+      ReportNewDeleteSizeMismatch(p, delete_size, stack);
+    }
+    ASAN_FREE_HOOK(ptr);
+    // Must mark the chunk as quarantined before any changes to its metadata.
+    AtomicallySetQuarantineFlag(m, ptr, stack);
+    QuarantineChunk(m, ptr, stack, alloc_type);
+  }
+
+  void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
+    CHECK(old_ptr && new_size);
+    uptr p = reinterpret_cast<uptr>(old_ptr);
+    uptr chunk_beg = p - kChunkHeaderSize;
+    AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+
+    AsanStats &thread_stats = GetCurrentThreadStats();
+    thread_stats.reallocs++;
+    thread_stats.realloced += new_size;
+
+    void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
+    if (new_ptr) {
+      u8 chunk_state = m->chunk_state;
+      if (chunk_state != CHUNK_ALLOCATED)
+        ReportInvalidFree(old_ptr, chunk_state, stack);
+      CHECK_NE(REAL(memcpy), (void*)0);
+      uptr memcpy_size = Min(new_size, m->UsedSize());
+      // If realloc() races with free(), we may start copying freed memory.
+      // However, we will report racy double-free later anyway.
+      REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
+      Deallocate(old_ptr, 0, stack, FROM_MALLOC);
+    }
+    return new_ptr;
+  }
+
+  void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+    if (CallocShouldReturnNullDueToOverflow(size, nmemb))
+      return allocator.ReturnNullOrDie();
+    void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
+    // If the memory comes from the secondary allocator no need to clear it
+    // as it comes directly from mmap.
+    if (ptr && allocator.FromPrimary(ptr))
+      REAL(memset)(ptr, 0, nmemb * size);
+    return ptr;
+  }
+
+  void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
+    if (chunk_state == CHUNK_QUARANTINE)
+      ReportDoubleFree((uptr)ptr, stack);
+    else
+      ReportFreeNotMalloced((uptr)ptr, stack);
+  }
+
+  void CommitBack(AsanThreadLocalMallocStorage *ms) {
+    AllocatorCache *ac = GetAllocatorCache(ms);
+    quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
+    allocator.SwallowCache(ac);
+  }
+
+  // -------------------------- Chunk lookup ----------------------
+
+  // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
+  AsanChunk *GetAsanChunk(void *alloc_beg) {
+    if (!alloc_beg) return 0;
+    if (!allocator.FromPrimary(alloc_beg)) {
+      uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
+      AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
+      return m;
+    }
+    uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
+    if (alloc_magic[0] == kAllocBegMagic)
+      return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
+    return reinterpret_cast<AsanChunk *>(alloc_beg);
+  }
+
+  AsanChunk *GetAsanChunkByAddr(uptr p) {
+    void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
+    return GetAsanChunk(alloc_beg);
+  }
+
+  // Allocator must be locked when this function is called.
+  AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
+    void *alloc_beg =
+        allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
+    return GetAsanChunk(alloc_beg);
+  }
+
+  uptr AllocationSize(uptr p) {
+    AsanChunk *m = GetAsanChunkByAddr(p);
+    if (!m) return 0;
+    if (m->chunk_state != CHUNK_ALLOCATED) return 0;
+    if (m->Beg() != p) return 0;
+    return m->UsedSize();
+  }
+
+  AsanChunkView FindHeapChunkByAddress(uptr addr) {
+    AsanChunk *m1 = GetAsanChunkByAddr(addr);
+    if (!m1) return AsanChunkView(m1);
+    sptr offset = 0;
+    if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
+      // The address is in the chunk's left redzone, so maybe it is actually
+      // a right buffer overflow from the other chunk to the left.
+      // Search a bit to the left to see if there is another chunk.
+      AsanChunk *m2 = 0;
+      for (uptr l = 1; l < GetPageSizeCached(); l++) {
+        m2 = GetAsanChunkByAddr(addr - l);
+        if (m2 == m1) continue;  // Still the same chunk.
+        break;
+      }
+      if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
+        m1 = ChooseChunk(addr, m2, m1);
+    }
+    return AsanChunkView(m1);
+  }
+
+  void PrintStats() {
+    allocator.PrintStats();
+  }
+
+  void ForceLock() {
+    allocator.ForceLock();
+    fallback_mutex.Lock();
+  }
+
+  void ForceUnlock() {
+    fallback_mutex.Unlock();
+    allocator.ForceUnlock();
+  }
+};
+
+static Allocator instance(LINKER_INITIALIZED);
+
+static AsanAllocator &get_allocator() {
+  return instance.allocator;
+}
+
+bool AsanChunkView::IsValid() {
+  return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
+}
+uptr AsanChunkView::Beg() { return chunk_->Beg(); }
+uptr AsanChunkView::End() { return Beg() + UsedSize(); }
+uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
+uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
+uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
+
+static StackTrace GetStackTraceFromId(u32 id) {
+  CHECK(id);
+  StackTrace res = StackDepotGet(id);
+  CHECK(res.trace);
+  return res;
+}
+
+StackTrace AsanChunkView::GetAllocStack() {
+  return GetStackTraceFromId(chunk_->alloc_context_id);
+}
+
+StackTrace AsanChunkView::GetFreeStack() {
+  return GetStackTraceFromId(chunk_->free_context_id);
+}
+
+void InitializeAllocator(const AllocatorOptions &options) {
+  instance.Initialize(options);
+}
+
+void ReInitializeAllocator(const AllocatorOptions &options) {
+  instance.ReInitialize(options);
+}
+
+void GetAllocatorOptions(AllocatorOptions *options) {
+  instance.GetOptions(options);
+}
+
+AsanChunkView FindHeapChunkByAddress(uptr addr) {
+  return instance.FindHeapChunkByAddress(addr);
+}
+
+void AsanThreadLocalMallocStorage::CommitBack() {
+  instance.CommitBack(this);
+}
+
+void PrintInternalAllocatorStats() {
+  instance.PrintStats();
+}
+
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
+                    AllocType alloc_type) {
+  return instance.Allocate(size, alignment, stack, alloc_type, true);
+}
+
+void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
+  instance.Deallocate(ptr, 0, stack, alloc_type);
+}
+
+void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
+                     AllocType alloc_type) {
+  instance.Deallocate(ptr, size, stack, alloc_type);
+}
+
+void *asan_malloc(uptr size, BufferedStackTrace *stack) {
+  return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
+}
+
+void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+  return instance.Calloc(nmemb, size, stack);
+}
+
+void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
+  if (p == 0)
+    return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
+  if (size == 0) {
+    instance.Deallocate(p, 0, stack, FROM_MALLOC);
+    return 0;
+  }
+  return instance.Reallocate(p, size, stack);
+}
+
+void *asan_valloc(uptr size, BufferedStackTrace *stack) {
+  return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
+}
+
+void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
+  uptr PageSize = GetPageSizeCached();
+  size = RoundUpTo(size, PageSize);
+  if (size == 0) {
+    // pvalloc(0) should allocate one page.
+    size = PageSize;
+  }
+  return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
+}
+
+int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
+                        BufferedStackTrace *stack) {
+  void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
+  CHECK(IsAligned((uptr)ptr, alignment));
+  *memptr = ptr;
+  return 0;
+}
+
+uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
+  if (ptr == 0) return 0;
+  uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
+  if (flags()->check_malloc_usable_size && (usable_size == 0)) {
+    GET_STACK_TRACE_FATAL(pc, bp);
+    ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
+  }
+  return usable_size;
+}
+
+uptr asan_mz_size(const void *ptr) {
+  return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
+}
+
+void asan_mz_force_lock() {
+  instance.ForceLock();
+}
+
+void asan_mz_force_unlock() {
+  instance.ForceUnlock();
+}
+
+void AsanSoftRssLimitExceededCallback(bool exceeded) {
+  instance.allocator.SetRssLimitIsExceeded(exceeded);
+}
+
+}  // namespace __asan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+void LockAllocator() {
+  __asan::get_allocator().ForceLock();
+}
+
+void UnlockAllocator() {
+  __asan::get_allocator().ForceUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+  *begin = (uptr)&__asan::get_allocator();
+  *end = *begin + sizeof(__asan::get_allocator());
+}
+
+uptr PointsIntoChunk(void* p) {
+  uptr addr = reinterpret_cast<uptr>(p);
+  __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
+  if (!m) return 0;
+  uptr chunk = m->Beg();
+  if (m->chunk_state != __asan::CHUNK_ALLOCATED)
+    return 0;
+  if (m->AddrIsInside(addr, /*locked_version=*/true))
+    return chunk;
+  if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
+                                  addr))
+    return chunk;
+  return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+  __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
+  CHECK(m);
+  return m->Beg();
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+  metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
+}
+
+bool LsanMetadata::allocated() const {
+  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+  return m->chunk_state == __asan::CHUNK_ALLOCATED;
+}
+
+ChunkTag LsanMetadata::tag() const {
+  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+  return static_cast<ChunkTag>(m->lsan_tag);
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+  m->lsan_tag = value;
+}
+
+uptr LsanMetadata::requested_size() const {
+  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+  return m->UsedSize(/*locked_version=*/true);
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+  return m->alloc_context_id;
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+  __asan::get_allocator().ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+  uptr addr = reinterpret_cast<uptr>(p);
+  __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
+  if (!m) return kIgnoreObjectInvalid;
+  if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
+    if (m->lsan_tag == kIgnored)
+      return kIgnoreObjectAlreadyIgnored;
+    m->lsan_tag = __lsan::kIgnored;
+    return kIgnoreObjectSuccess;
+  } else {
+    return kIgnoreObjectInvalid;
+  }
+}
+}  // namespace __lsan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan;  // NOLINT
+
+// ASan allocator doesn't reserve extra bytes, so normally we would
+// just return "size". We don't want to expose our redzone sizes, etc here.
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
+  return size;
+}
+
+int __sanitizer_get_ownership(const void *p) {
+  uptr ptr = reinterpret_cast<uptr>(p);
+  return instance.AllocationSize(ptr) > 0;
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) {
+  if (p == 0) return 0;
+  uptr ptr = reinterpret_cast<uptr>(p);
+  uptr allocated_size = instance.AllocationSize(ptr);
+  // Die if p is not malloced or if it is already freed.
+  if (allocated_size == 0) {
+    GET_STACK_TRACE_FATAL_HERE;
+    ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
+  }
+  return allocated_size;
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
+  (void)ptr;
+  (void)size;
+}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_free_hook(void *ptr) {
+  (void)ptr;
+}
+}  // extern "C"
+#endif
diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h
index 6d3a992..3208d1f 100644
--- a/lib/asan/asan_allocator.h
+++ b/lib/asan/asan_allocator.h
@@ -9,12 +9,13 @@
 //
 // This file is a part of AddressSanitizer, an address sanity checker.
 //
-// ASan-private header for asan_allocator2.cc.
+// ASan-private header for asan_allocator.cc.
 //===----------------------------------------------------------------------===//
 
 #ifndef ASAN_ALLOCATOR_H
 #define ASAN_ALLOCATOR_H
 
+#include "asan_flags.h"
 #include "asan_internal.h"
 #include "asan_interceptors.h"
 #include "sanitizer_common/sanitizer_allocator.h"
@@ -31,8 +32,20 @@
 static const uptr kNumberOfSizeClasses = 255;
 struct AsanChunk;
 
-void InitializeAllocator();
-void ReInitializeAllocator();
+struct AllocatorOptions {
+  u32 quarantine_size_mb;
+  u16 min_redzone;
+  u16 max_redzone;
+  u8 may_return_null;
+  u8 alloc_dealloc_mismatch;
+
+  void SetFrom(const Flags *f, const CommonFlags *cf);
+  void CopyTo(Flags *f, CommonFlags *cf);
+};
+
+void InitializeAllocator(const AllocatorOptions &options);
+void ReInitializeAllocator(const AllocatorOptions &options);
+void GetAllocatorOptions(AllocatorOptions *options);
 
 class AsanChunkView {
  public:
@@ -127,12 +140,12 @@
 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
 typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
-    SecondaryAllocator> Allocator;
+    SecondaryAllocator> AsanAllocator;
 
 
 struct AsanThreadLocalMallocStorage {
   uptr quarantine_cache[16];
-  AllocatorCache allocator2_cache;
+  AllocatorCache allocator_cache;
   void CommitBack();
  private:
   // These objects are allocated via mmap() and are zero-initialized.
@@ -160,6 +173,7 @@
 void asan_mz_force_unlock();
 
 void PrintInternalAllocatorStats();
+void AsanSoftRssLimitExceededCallback(bool exceeded);
 
 }  // namespace __asan
 #endif  // ASAN_ALLOCATOR_H
diff --git a/lib/asan/asan_allocator2.cc b/lib/asan/asan_allocator2.cc
deleted file mode 100644
index 52bdcf6..0000000
--- a/lib/asan/asan_allocator2.cc
+++ /dev/null
@@ -1,792 +0,0 @@
-//===-- asan_allocator2.cc ------------------------------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// Implementation of ASan's memory allocator, 2-nd version.
-// This variant uses the allocator from sanitizer_common, i.e. the one shared
-// with ThreadSanitizer and MemorySanitizer.
-//
-//===----------------------------------------------------------------------===//
-#include "asan_allocator.h"
-
-#include "asan_mapping.h"
-#include "asan_poisoning.h"
-#include "asan_report.h"
-#include "asan_stack.h"
-#include "asan_thread.h"
-#include "sanitizer_common/sanitizer_allocator_interface.h"
-#include "sanitizer_common/sanitizer_flags.h"
-#include "sanitizer_common/sanitizer_internal_defs.h"
-#include "sanitizer_common/sanitizer_list.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
-#include "sanitizer_common/sanitizer_quarantine.h"
-#include "lsan/lsan_common.h"
-
-namespace __asan {
-
-void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
-  PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
-  // Statistics.
-  AsanStats &thread_stats = GetCurrentThreadStats();
-  thread_stats.mmaps++;
-  thread_stats.mmaped += size;
-}
-void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
-  PoisonShadow(p, size, 0);
-  // We are about to unmap a chunk of user memory.
-  // Mark the corresponding shadow memory as not needed.
-  FlushUnneededASanShadowMemory(p, size);
-  // Statistics.
-  AsanStats &thread_stats = GetCurrentThreadStats();
-  thread_stats.munmaps++;
-  thread_stats.munmaped += size;
-}
-
-// We can not use THREADLOCAL because it is not supported on some of the
-// platforms we care about (OSX 10.6, Android).
-// static THREADLOCAL AllocatorCache cache;
-AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
-  CHECK(ms);
-  return &ms->allocator2_cache;
-}
-
-static Allocator allocator;
-
-static const uptr kMaxAllowedMallocSize =
-  FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
-
-static const uptr kMaxThreadLocalQuarantine =
-  FIRST_32_SECOND_64(1 << 18, 1 << 20);
-
-// Every chunk of memory allocated by this allocator can be in one of 3 states:
-// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
-// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
-// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
-enum {
-  CHUNK_AVAILABLE  = 0,  // 0 is the default value even if we didn't set it.
-  CHUNK_ALLOCATED  = 2,
-  CHUNK_QUARANTINE = 3
-};
-
-// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
-// We use adaptive redzones: for larger allocation larger redzones are used.
-static u32 RZLog2Size(u32 rz_log) {
-  CHECK_LT(rz_log, 8);
-  return 16 << rz_log;
-}
-
-static u32 RZSize2Log(u32 rz_size) {
-  CHECK_GE(rz_size, 16);
-  CHECK_LE(rz_size, 2048);
-  CHECK(IsPowerOfTwo(rz_size));
-  u32 res = Log2(rz_size) - 4;
-  CHECK_EQ(rz_size, RZLog2Size(res));
-  return res;
-}
-
-static uptr ComputeRZLog(uptr user_requested_size) {
-  u32 rz_log =
-    user_requested_size <= 64        - 16   ? 0 :
-    user_requested_size <= 128       - 32   ? 1 :
-    user_requested_size <= 512       - 64   ? 2 :
-    user_requested_size <= 4096      - 128  ? 3 :
-    user_requested_size <= (1 << 14) - 256  ? 4 :
-    user_requested_size <= (1 << 15) - 512  ? 5 :
-    user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
-  return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
-             RZSize2Log(flags()->max_redzone));
-}
-
-// The memory chunk allocated from the underlying allocator looks like this:
-// L L L L L L H H U U U U U U R R
-//   L -- left redzone words (0 or more bytes)
-//   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
-//   U -- user memory.
-//   R -- right redzone (0 or more bytes)
-// ChunkBase consists of ChunkHeader and other bytes that overlap with user
-// memory.
-
-// If the left redzone is greater than the ChunkHeader size we store a magic
-// value in the first uptr word of the memory block and store the address of
-// ChunkBase in the next uptr.
-// M B L L L L L L L L L  H H U U U U U U
-//   |                    ^
-//   ---------------------|
-//   M -- magic value kAllocBegMagic
-//   B -- address of ChunkHeader pointing to the first 'H'
-static const uptr kAllocBegMagic = 0xCC6E96B9;
-
-struct ChunkHeader {
-  // 1-st 8 bytes.
-  u32 chunk_state       : 8;  // Must be first.
-  u32 alloc_tid         : 24;
-
-  u32 free_tid          : 24;
-  u32 from_memalign     : 1;
-  u32 alloc_type        : 2;
-  u32 rz_log            : 3;
-  u32 lsan_tag          : 2;
-  // 2-nd 8 bytes
-  // This field is used for small sizes. For large sizes it is equal to
-  // SizeClassMap::kMaxSize and the actual size is stored in the
-  // SecondaryAllocator's metadata.
-  u32 user_requested_size;
-  u32 alloc_context_id;
-};
-
-struct ChunkBase : ChunkHeader {
-  // Header2, intersects with user memory.
-  u32 free_context_id;
-};
-
-static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
-static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
-COMPILER_CHECK(kChunkHeaderSize == 16);
-COMPILER_CHECK(kChunkHeader2Size <= 16);
-
-struct AsanChunk: ChunkBase {
-  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
-  uptr UsedSize(bool locked_version = false) {
-    if (user_requested_size != SizeClassMap::kMaxSize)
-      return user_requested_size;
-    return *reinterpret_cast<uptr *>(
-                allocator.GetMetaData(AllocBeg(locked_version)));
-  }
-  void *AllocBeg(bool locked_version = false) {
-    if (from_memalign) {
-      if (locked_version)
-        return allocator.GetBlockBeginFastLocked(
-            reinterpret_cast<void *>(this));
-      return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
-    }
-    return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
-  }
-  bool AddrIsInside(uptr addr, bool locked_version = false) {
-    return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
-  }
-};
-
-bool AsanChunkView::IsValid() {
-  return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
-}
-uptr AsanChunkView::Beg() { return chunk_->Beg(); }
-uptr AsanChunkView::End() { return Beg() + UsedSize(); }
-uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
-uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
-uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
-
-static StackTrace GetStackTraceFromId(u32 id) {
-  CHECK(id);
-  StackTrace res = StackDepotGet(id);
-  CHECK(res.trace);
-  return res;
-}
-
-StackTrace AsanChunkView::GetAllocStack() {
-  return GetStackTraceFromId(chunk_->alloc_context_id);
-}
-
-StackTrace AsanChunkView::GetFreeStack() {
-  return GetStackTraceFromId(chunk_->free_context_id);
-}
-
-struct QuarantineCallback;
-typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
-typedef AsanQuarantine::Cache QuarantineCache;
-static AsanQuarantine quarantine(LINKER_INITIALIZED);
-static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
-static AllocatorCache fallback_allocator_cache;
-static SpinMutex fallback_mutex;
-
-QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
-  CHECK(ms);
-  CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
-  return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
-}
-
-struct QuarantineCallback {
-  explicit QuarantineCallback(AllocatorCache *cache)
-      : cache_(cache) {
-  }
-
-  void Recycle(AsanChunk *m) {
-    CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
-    atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
-    CHECK_NE(m->alloc_tid, kInvalidTid);
-    CHECK_NE(m->free_tid, kInvalidTid);
-    PoisonShadow(m->Beg(),
-                 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
-                 kAsanHeapLeftRedzoneMagic);
-    void *p = reinterpret_cast<void *>(m->AllocBeg());
-    if (p != m) {
-      uptr *alloc_magic = reinterpret_cast<uptr *>(p);
-      CHECK_EQ(alloc_magic[0], kAllocBegMagic);
-      // Clear the magic value, as allocator internals may overwrite the
-      // contents of deallocated chunk, confusing GetAsanChunk lookup.
-      alloc_magic[0] = 0;
-      CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
-    }
-
-    // Statistics.
-    AsanStats &thread_stats = GetCurrentThreadStats();
-    thread_stats.real_frees++;
-    thread_stats.really_freed += m->UsedSize();
-
-    allocator.Deallocate(cache_, p);
-  }
-
-  void *Allocate(uptr size) {
-    return allocator.Allocate(cache_, size, 1, false);
-  }
-
-  void Deallocate(void *p) {
-    allocator.Deallocate(cache_, p);
-  }
-
-  AllocatorCache *cache_;
-};
-
-void InitializeAllocator() {
-  allocator.Init();
-  quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
-}
-
-void ReInitializeAllocator() {
-  quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
-}
-
-static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
-                      AllocType alloc_type, bool can_fill) {
-  if (UNLIKELY(!asan_inited))
-    AsanInitFromRtl();
-  Flags &fl = *flags();
-  CHECK(stack);
-  const uptr min_alignment = SHADOW_GRANULARITY;
-  if (alignment < min_alignment)
-    alignment = min_alignment;
-  if (size == 0) {
-    // We'd be happy to avoid allocating memory for zero-size requests, but
-    // some programs/tests depend on this behavior and assume that malloc would
-    // not return NULL even for zero-size allocations. Moreover, it looks like
-    // operator new should never return NULL, and results of consecutive "new"
-    // calls must be different even if the allocated size is zero.
-    size = 1;
-  }
-  CHECK(IsPowerOfTwo(alignment));
-  uptr rz_log = ComputeRZLog(size);
-  uptr rz_size = RZLog2Size(rz_log);
-  uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
-  uptr needed_size = rounded_size + rz_size;
-  if (alignment > min_alignment)
-    needed_size += alignment;
-  bool using_primary_allocator = true;
-  // If we are allocating from the secondary allocator, there will be no
-  // automatic right redzone, so add the right redzone manually.
-  if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
-    needed_size += rz_size;
-    using_primary_allocator = false;
-  }
-  CHECK(IsAligned(needed_size, min_alignment));
-  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
-    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
-           (void*)size);
-    return AllocatorReturnNull();
-  }
-
-  AsanThread *t = GetCurrentThread();
-  void *allocated;
-  if (t) {
-    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
-    allocated = allocator.Allocate(cache, needed_size, 8, false);
-  } else {
-    SpinMutexLock l(&fallback_mutex);
-    AllocatorCache *cache = &fallback_allocator_cache;
-    allocated = allocator.Allocate(cache, needed_size, 8, false);
-  }
-
-  if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
-    // Heap poisoning is enabled, but the allocator provides an unpoisoned
-    // chunk. This is possible if flags()->poison_heap was disabled for some
-    // time, for example, due to flags()->start_disabled.
-    // Anyway, poison the block before using it for anything else.
-    uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
-    PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
-  }
-
-  uptr alloc_beg = reinterpret_cast<uptr>(allocated);
-  uptr alloc_end = alloc_beg + needed_size;
-  uptr beg_plus_redzone = alloc_beg + rz_size;
-  uptr user_beg = beg_plus_redzone;
-  if (!IsAligned(user_beg, alignment))
-    user_beg = RoundUpTo(user_beg, alignment);
-  uptr user_end = user_beg + size;
-  CHECK_LE(user_end, alloc_end);
-  uptr chunk_beg = user_beg - kChunkHeaderSize;
-  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
-  m->alloc_type = alloc_type;
-  m->rz_log = rz_log;
-  u32 alloc_tid = t ? t->tid() : 0;
-  m->alloc_tid = alloc_tid;
-  CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
-  m->free_tid = kInvalidTid;
-  m->from_memalign = user_beg != beg_plus_redzone;
-  if (alloc_beg != chunk_beg) {
-    CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
-    reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
-    reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
-  }
-  if (using_primary_allocator) {
-    CHECK(size);
-    m->user_requested_size = size;
-    CHECK(allocator.FromPrimary(allocated));
-  } else {
-    CHECK(!allocator.FromPrimary(allocated));
-    m->user_requested_size = SizeClassMap::kMaxSize;
-    uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
-    meta[0] = size;
-    meta[1] = chunk_beg;
-  }
-
-  m->alloc_context_id = StackDepotPut(*stack);
-
-  uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
-  // Unpoison the bulk of the memory region.
-  if (size_rounded_down_to_granularity)
-    PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
-  // Deal with the end of the region if size is not aligned to granularity.
-  if (size != size_rounded_down_to_granularity && fl.poison_heap) {
-    u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
-    *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
-  }
-
-  AsanStats &thread_stats = GetCurrentThreadStats();
-  thread_stats.mallocs++;
-  thread_stats.malloced += size;
-  thread_stats.malloced_redzones += needed_size - size;
-  uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
-  thread_stats.malloced_by_size[class_id]++;
-  if (needed_size > SizeClassMap::kMaxSize)
-    thread_stats.malloc_large++;
-
-  void *res = reinterpret_cast<void *>(user_beg);
-  if (can_fill && fl.max_malloc_fill_size) {
-    uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
-    REAL(memset)(res, fl.malloc_fill_byte, fill_size);
-  }
-#if CAN_SANITIZE_LEAKS
-  m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
-                                               : __lsan::kDirectlyLeaked;
-#endif
-  // Must be the last mutation of metadata in this function.
-  atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
-  ASAN_MALLOC_HOOK(res, size);
-  return res;
-}
-
-static void ReportInvalidFree(void *ptr, u8 chunk_state,
-                              BufferedStackTrace *stack) {
-  if (chunk_state == CHUNK_QUARANTINE)
-    ReportDoubleFree((uptr)ptr, stack);
-  else
-    ReportFreeNotMalloced((uptr)ptr, stack);
-}
-
-static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
-                                        BufferedStackTrace *stack) {
-  u8 old_chunk_state = CHUNK_ALLOCATED;
-  // Flip the chunk_state atomically to avoid race on double-free.
-  if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
-                                      CHUNK_QUARANTINE, memory_order_acquire))
-    ReportInvalidFree(ptr, old_chunk_state, stack);
-  CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
-}
-
-// Expects the chunk to already be marked as quarantined by using
-// AtomicallySetQuarantineFlag.
-static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
-                            AllocType alloc_type) {
-  CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
-
-  if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
-    ReportAllocTypeMismatch((uptr)ptr, stack,
-                            (AllocType)m->alloc_type, (AllocType)alloc_type);
-
-  CHECK_GE(m->alloc_tid, 0);
-  if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
-    CHECK_EQ(m->free_tid, kInvalidTid);
-  AsanThread *t = GetCurrentThread();
-  m->free_tid = t ? t->tid() : 0;
-  m->free_context_id = StackDepotPut(*stack);
-  // Poison the region.
-  PoisonShadow(m->Beg(),
-               RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
-               kAsanHeapFreeMagic);
-
-  AsanStats &thread_stats = GetCurrentThreadStats();
-  thread_stats.frees++;
-  thread_stats.freed += m->UsedSize();
-
-  // Push into quarantine.
-  if (t) {
-    AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
-    AllocatorCache *ac = GetAllocatorCache(ms);
-    quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
-                   m, m->UsedSize());
-  } else {
-    SpinMutexLock l(&fallback_mutex);
-    AllocatorCache *ac = &fallback_allocator_cache;
-    quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
-                   m, m->UsedSize());
-  }
-}
-
-static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
-                       AllocType alloc_type) {
-  uptr p = reinterpret_cast<uptr>(ptr);
-  if (p == 0) return;
-
-  uptr chunk_beg = p - kChunkHeaderSize;
-  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
-  if (delete_size && flags()->new_delete_type_mismatch &&
-      delete_size != m->UsedSize()) {
-    ReportNewDeleteSizeMismatch(p, delete_size, stack);
-  }
-  ASAN_FREE_HOOK(ptr);
-  // Must mark the chunk as quarantined before any changes to its metadata.
-  AtomicallySetQuarantineFlag(m, ptr, stack);
-  QuarantineChunk(m, ptr, stack, alloc_type);
-}
-
-static void *Reallocate(void *old_ptr, uptr new_size,
-                        BufferedStackTrace *stack) {
-  CHECK(old_ptr && new_size);
-  uptr p = reinterpret_cast<uptr>(old_ptr);
-  uptr chunk_beg = p - kChunkHeaderSize;
-  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
-
-  AsanStats &thread_stats = GetCurrentThreadStats();
-  thread_stats.reallocs++;
-  thread_stats.realloced += new_size;
-
-  void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
-  if (new_ptr) {
-    u8 chunk_state = m->chunk_state;
-    if (chunk_state != CHUNK_ALLOCATED)
-      ReportInvalidFree(old_ptr, chunk_state, stack);
-    CHECK_NE(REAL(memcpy), (void*)0);
-    uptr memcpy_size = Min(new_size, m->UsedSize());
-    // If realloc() races with free(), we may start copying freed memory.
-    // However, we will report racy double-free later anyway.
-    REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
-    Deallocate(old_ptr, 0, stack, FROM_MALLOC);
-  }
-  return new_ptr;
-}
-
-// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
-static AsanChunk *GetAsanChunk(void *alloc_beg) {
-  if (!alloc_beg) return 0;
-  if (!allocator.FromPrimary(alloc_beg)) {
-    uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
-    AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
-    return m;
-  }
-  uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
-  if (alloc_magic[0] == kAllocBegMagic)
-    return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
-  return reinterpret_cast<AsanChunk *>(alloc_beg);
-}
-
-static AsanChunk *GetAsanChunkByAddr(uptr p) {
-  void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
-  return GetAsanChunk(alloc_beg);
-}
-
-// Allocator must be locked when this function is called.
-static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
-  void *alloc_beg =
-      allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
-  return GetAsanChunk(alloc_beg);
-}
-
-static uptr AllocationSize(uptr p) {
-  AsanChunk *m = GetAsanChunkByAddr(p);
-  if (!m) return 0;
-  if (m->chunk_state != CHUNK_ALLOCATED) return 0;
-  if (m->Beg() != p) return 0;
-  return m->UsedSize();
-}
-
-// We have an address between two chunks, and we want to report just one.
-AsanChunk *ChooseChunk(uptr addr,
-                       AsanChunk *left_chunk, AsanChunk *right_chunk) {
-  // Prefer an allocated chunk over freed chunk and freed chunk
-  // over available chunk.
-  if (left_chunk->chunk_state != right_chunk->chunk_state) {
-    if (left_chunk->chunk_state == CHUNK_ALLOCATED)
-      return left_chunk;
-    if (right_chunk->chunk_state == CHUNK_ALLOCATED)
-      return right_chunk;
-    if (left_chunk->chunk_state == CHUNK_QUARANTINE)
-      return left_chunk;
-    if (right_chunk->chunk_state == CHUNK_QUARANTINE)
-      return right_chunk;
-  }
-  // Same chunk_state: choose based on offset.
-  sptr l_offset = 0, r_offset = 0;
-  CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
-  CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
-  if (l_offset < r_offset)
-    return left_chunk;
-  return right_chunk;
-}
-
-AsanChunkView FindHeapChunkByAddress(uptr addr) {
-  AsanChunk *m1 = GetAsanChunkByAddr(addr);
-  if (!m1) return AsanChunkView(m1);
-  sptr offset = 0;
-  if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
-    // The address is in the chunk's left redzone, so maybe it is actually
-    // a right buffer overflow from the other chunk to the left.
-    // Search a bit to the left to see if there is another chunk.
-    AsanChunk *m2 = 0;
-    for (uptr l = 1; l < GetPageSizeCached(); l++) {
-      m2 = GetAsanChunkByAddr(addr - l);
-      if (m2 == m1) continue;  // Still the same chunk.
-      break;
-    }
-    if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
-      m1 = ChooseChunk(addr, m2, m1);
-  }
-  return AsanChunkView(m1);
-}
-
-void AsanThreadLocalMallocStorage::CommitBack() {
-  AllocatorCache *ac = GetAllocatorCache(this);
-  quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
-  allocator.SwallowCache(GetAllocatorCache(this));
-}
-
-void PrintInternalAllocatorStats() {
-  allocator.PrintStats();
-}
-
-void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
-                    AllocType alloc_type) {
-  return Allocate(size, alignment, stack, alloc_type, true);
-}
-
-void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
-  Deallocate(ptr, 0, stack, alloc_type);
-}
-
-void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
-                     AllocType alloc_type) {
-  Deallocate(ptr, size, stack, alloc_type);
-}
-
-void *asan_malloc(uptr size, BufferedStackTrace *stack) {
-  return Allocate(size, 8, stack, FROM_MALLOC, true);
-}
-
-void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
-  if (CallocShouldReturnNullDueToOverflow(size, nmemb))
-    return AllocatorReturnNull();
-  void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
-  // If the memory comes from the secondary allocator no need to clear it
-  // as it comes directly from mmap.
-  if (ptr && allocator.FromPrimary(ptr))
-    REAL(memset)(ptr, 0, nmemb * size);
-  return ptr;
-}
-
-void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
-  if (p == 0)
-    return Allocate(size, 8, stack, FROM_MALLOC, true);
-  if (size == 0) {
-    Deallocate(p, 0, stack, FROM_MALLOC);
-    return 0;
-  }
-  return Reallocate(p, size, stack);
-}
-
-void *asan_valloc(uptr size, BufferedStackTrace *stack) {
-  return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
-}
-
-void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
-  uptr PageSize = GetPageSizeCached();
-  size = RoundUpTo(size, PageSize);
-  if (size == 0) {
-    // pvalloc(0) should allocate one page.
-    size = PageSize;
-  }
-  return Allocate(size, PageSize, stack, FROM_MALLOC, true);
-}
-
-int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
-                        BufferedStackTrace *stack) {
-  void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
-  CHECK(IsAligned((uptr)ptr, alignment));
-  *memptr = ptr;
-  return 0;
-}
-
-uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
-  if (ptr == 0) return 0;
-  uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
-  if (flags()->check_malloc_usable_size && (usable_size == 0)) {
-    GET_STACK_TRACE_FATAL(pc, bp);
-    ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
-  }
-  return usable_size;
-}
-
-uptr asan_mz_size(const void *ptr) {
-  return AllocationSize(reinterpret_cast<uptr>(ptr));
-}
-
-void asan_mz_force_lock() {
-  allocator.ForceLock();
-  fallback_mutex.Lock();
-}
-
-void asan_mz_force_unlock() {
-  fallback_mutex.Unlock();
-  allocator.ForceUnlock();
-}
-
-}  // namespace __asan
-
-// --- Implementation of LSan-specific functions --- {{{1
-namespace __lsan {
-void LockAllocator() {
-  __asan::allocator.ForceLock();
-}
-
-void UnlockAllocator() {
-  __asan::allocator.ForceUnlock();
-}
-
-void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
-  *begin = (uptr)&__asan::allocator;
-  *end = *begin + sizeof(__asan::allocator);
-}
-
-uptr PointsIntoChunk(void* p) {
-  uptr addr = reinterpret_cast<uptr>(p);
-  __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
-  if (!m) return 0;
-  uptr chunk = m->Beg();
-  if (m->chunk_state != __asan::CHUNK_ALLOCATED)
-    return 0;
-  if (m->AddrIsInside(addr, /*locked_version=*/true))
-    return chunk;
-  if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
-                                  addr))
-    return chunk;
-  return 0;
-}
-
-uptr GetUserBegin(uptr chunk) {
-  __asan::AsanChunk *m =
-      __asan::GetAsanChunkByAddrFastLocked(chunk);
-  CHECK(m);
-  return m->Beg();
-}
-
-LsanMetadata::LsanMetadata(uptr chunk) {
-  metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
-}
-
-bool LsanMetadata::allocated() const {
-  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
-  return m->chunk_state == __asan::CHUNK_ALLOCATED;
-}
-
-ChunkTag LsanMetadata::tag() const {
-  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
-  return static_cast<ChunkTag>(m->lsan_tag);
-}
-
-void LsanMetadata::set_tag(ChunkTag value) {
-  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
-  m->lsan_tag = value;
-}
-
-uptr LsanMetadata::requested_size() const {
-  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
-  return m->UsedSize(/*locked_version=*/true);
-}
-
-u32 LsanMetadata::stack_trace_id() const {
-  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
-  return m->alloc_context_id;
-}
-
-void ForEachChunk(ForEachChunkCallback callback, void *arg) {
-  __asan::allocator.ForEachChunk(callback, arg);
-}
-
-IgnoreObjectResult IgnoreObjectLocked(const void *p) {
-  uptr addr = reinterpret_cast<uptr>(p);
-  __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
-  if (!m) return kIgnoreObjectInvalid;
-  if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
-    if (m->lsan_tag == kIgnored)
-      return kIgnoreObjectAlreadyIgnored;
-    m->lsan_tag = __lsan::kIgnored;
-    return kIgnoreObjectSuccess;
-  } else {
-    return kIgnoreObjectInvalid;
-  }
-}
-}  // namespace __lsan
-
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan;  // NOLINT
-
-// ASan allocator doesn't reserve extra bytes, so normally we would
-// just return "size". We don't want to expose our redzone sizes, etc here.
-uptr __sanitizer_get_estimated_allocated_size(uptr size) {
-  return size;
-}
-
-int __sanitizer_get_ownership(const void *p) {
-  uptr ptr = reinterpret_cast<uptr>(p);
-  return (AllocationSize(ptr) > 0);
-}
-
-uptr __sanitizer_get_allocated_size(const void *p) {
-  if (p == 0) return 0;
-  uptr ptr = reinterpret_cast<uptr>(p);
-  uptr allocated_size = AllocationSize(ptr);
-  // Die if p is not malloced or if it is already freed.
-  if (allocated_size == 0) {
-    GET_STACK_TRACE_FATAL_HERE;
-    ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
-  }
-  return allocated_size;
-}
-
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-// Provide default (no-op) implementation of malloc hooks.
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_malloc_hook(void *ptr, uptr size) {
-  (void)ptr;
-  (void)size;
-}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_free_hook(void *ptr) {
-  (void)ptr;
-}
-}  // extern "C"
-#endif
diff --git a/lib/asan/asan_debugging.cc b/lib/asan/asan_debugging.cc
index 2b66dd5..6fc5b69 100644
--- a/lib/asan/asan_debugging.cc
+++ b/lib/asan/asan_debugging.cc
@@ -81,8 +81,8 @@
   GetInfoForHeapAddress(addr, descr);
 }
 
-uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
-                  bool alloc_stack) {
+static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
+                         bool alloc_stack) {
   AsanChunkView chunk = FindHeapChunkByAddress(addr);
   if (!chunk.IsValid()) return 0;
 
diff --git a/lib/asan/asan_fake_stack.cc b/lib/asan/asan_fake_stack.cc
index c7f13c7..bf4f1eb 100644
--- a/lib/asan/asan_fake_stack.cc
+++ b/lib/asan/asan_fake_stack.cc
@@ -27,8 +27,10 @@
   CHECK_EQ(SHADOW_SCALE, 3);  // This code expects SHADOW_SCALE=3.
   u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
   if (class_id <= 6) {
-    for (uptr i = 0; i < (1U << class_id); i++)
+    for (uptr i = 0; i < (1U << class_id); i++) {
       shadow[i] = magic;
+      SanitizerBreakOptimization(0);  // Make sure this does not become memset.
+    }
   } else {
     // The size class is too big, it's cheaper to poison only size bytes.
     PoisonShadow(ptr, size, static_cast<u8>(magic));
@@ -58,7 +60,7 @@
 
 void FakeStack::Destroy(int tid) {
   PoisonAll(0);
-  if (common_flags()->verbosity >= 2) {
+  if (Verbosity() >= 2) {
     InternalScopedString str(kNumberOfSizeClasses * 50);
     for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
       str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
@@ -190,20 +192,19 @@
   return GetFakeStack();
 }
 
-ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
+ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
   FakeStack *fs = GetFakeStackFast();
-  if (!fs) return real_stack;
+  if (!fs) return 0;
+  uptr local_stack;
+  uptr real_stack = reinterpret_cast<uptr>(&local_stack);
   FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
-  if (!ff)
-    return real_stack;  // Out of fake stack, return the real one.
+  if (!ff) return 0;  // Out of fake stack.
   uptr ptr = reinterpret_cast<uptr>(ff);
   SetShadow(ptr, size, class_id, 0);
   return ptr;
 }
 
-ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
-  if (ptr == real_stack)
-    return;
+ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
   FakeStack::Deallocate(ptr, class_id);
   SetShadow(ptr, size, class_id, kMagic8);
 }
@@ -214,12 +215,12 @@
 using namespace __asan;
 #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id)                       \
   extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr                                \
-  __asan_stack_malloc_##class_id(uptr size, uptr real_stack) {                 \
-    return OnMalloc(class_id, size, real_stack);                               \
+      __asan_stack_malloc_##class_id(uptr size) {                              \
+    return OnMalloc(class_id, size);                                           \
   }                                                                            \
   extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id(  \
-      uptr ptr, uptr size, uptr real_stack) {                                  \
-    OnFree(ptr, class_id, size, real_stack);                                   \
+      uptr ptr, uptr size) {                                                   \
+    OnFree(ptr, class_id, size);                                               \
   }
 
 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
diff --git a/lib/asan/asan_flags.cc b/lib/asan/asan_flags.cc
new file mode 100644
index 0000000..efb7767
--- /dev/null
+++ b/lib/asan/asan_flags.cc
@@ -0,0 +1,160 @@
+//===-- asan_flags.cc -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan flag parsing logic.
+//===----------------------------------------------------------------------===//
+
+#include "asan_activation.h"
+#include "asan_flags.h"
+#include "asan_interface_internal.h"
+#include "asan_stack.h"
+#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+namespace __asan {
+
+Flags asan_flags_dont_use_directly;  // use via flags().
+
+static const char *MaybeCallAsanDefaultOptions() {
+  return (&__asan_default_options) ? __asan_default_options() : "";
+}
+
+static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
+#ifdef ASAN_DEFAULT_OPTIONS
+// Stringize the macro value.
+# define ASAN_STRINGIZE(x) #x
+# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
+  return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
+#else
+  return "";
+#endif
+}
+
+void Flags::SetDefaults() {
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+}
+
+static void RegisterAsanFlags(FlagParser *parser, Flags *f) {
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) \
+  RegisterFlag(parser, #Name, Description, &f->Name);
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+}
+
+void InitializeFlags() {
+  // Set the default values and prepare for parsing ASan and common flags.
+  SetCommonFlagsDefaults();
+  {
+    CommonFlags cf;
+    cf.CopyFrom(*common_flags());
+    cf.detect_leaks = CAN_SANITIZE_LEAKS;
+    cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
+    cf.malloc_context_size = kDefaultMallocContextSize;
+    cf.intercept_tls_get_addr = true;
+    OverrideCommonFlags(cf);
+  }
+  Flags *f = flags();
+  f->SetDefaults();
+
+  FlagParser asan_parser;
+  RegisterAsanFlags(&asan_parser, f);
+  RegisterCommonFlags(&asan_parser);
+
+  // Set the default values and prepare for parsing LSan flags (which can also
+  // overwrite common flags).
+#if CAN_SANITIZE_LEAKS
+  __lsan::Flags *lf = __lsan::flags();
+  lf->SetDefaults();
+
+  FlagParser lsan_parser;
+  __lsan::RegisterLsanFlags(&lsan_parser, lf);
+  RegisterCommonFlags(&lsan_parser);
+#endif
+
+  // Override from ASan compile definition.
+  const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
+  asan_parser.ParseString(asan_compile_def);
+
+  // Override from user-specified string.
+  const char *asan_default_options = MaybeCallAsanDefaultOptions();
+  asan_parser.ParseString(asan_default_options);
+
+  // Override from command line.
+  asan_parser.ParseString(GetEnv("ASAN_OPTIONS"));
+#if CAN_SANITIZE_LEAKS
+  lsan_parser.ParseString(GetEnv("LSAN_OPTIONS"));
+#endif
+
+  // Let activation flags override current settings. On Android they come
+  // from a system property. On other platforms this is no-op.
+  if (!flags()->start_deactivated) {
+    char buf[100];
+    GetExtraActivationFlags(buf, sizeof(buf));
+    asan_parser.ParseString(buf);
+  }
+
+  SetVerbosity(common_flags()->verbosity);
+
+  // TODO(eugenis): dump all flags at verbosity>=2?
+  if (Verbosity()) ReportUnrecognizedFlags();
+
+  if (common_flags()->help) {
+    // TODO(samsonov): print all of the flags (ASan, LSan, common).
+    asan_parser.PrintFlagDescriptions();
+  }
+
+  // Flag validation:
+  if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
+    Report("%s: detect_leaks is not supported on this platform.\n",
+           SanitizerToolName);
+    Die();
+  }
+  // Make "strict_init_order" imply "check_initialization_order".
+  // TODO(samsonov): Use a single runtime flag for an init-order checker.
+  if (f->strict_init_order) {
+    f->check_initialization_order = true;
+  }
+  CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax);
+  CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log);
+  CHECK_GE(f->redzone, 16);
+  CHECK_GE(f->max_redzone, f->redzone);
+  CHECK_LE(f->max_redzone, 2048);
+  CHECK(IsPowerOfTwo(f->redzone));
+  CHECK(IsPowerOfTwo(f->max_redzone));
+
+  // quarantine_size is deprecated but we still honor it.
+  // quarantine_size can not be used together with quarantine_size_mb.
+  if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) {
+    Report("%s: please use either 'quarantine_size' (deprecated) or "
+           "quarantine_size_mb, but not both\n", SanitizerToolName);
+    Die();
+  }
+  if (f->quarantine_size >= 0)
+    f->quarantine_size_mb = f->quarantine_size >> 20;
+  if (f->quarantine_size_mb < 0) {
+    const int kDefaultQuarantineSizeMb =
+        (ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
+    f->quarantine_size_mb = kDefaultQuarantineSizeMb;
+  }
+}
+
+}  // namespace __asan
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __asan_default_options() { return ""; }
+}  // extern "C"
+#endif
diff --git a/lib/asan/asan_flags.h b/lib/asan/asan_flags.h
index 3df4dd3..4935161 100644
--- a/lib/asan/asan_flags.h
+++ b/lib/asan/asan_flags.h
@@ -16,6 +16,7 @@
 #define ASAN_FLAGS_H
 
 #include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
 
 // ASan flag values can be defined in four ways:
 // 1) initialized with default values at startup.
@@ -24,55 +25,24 @@
 // 3) overriden from string returned by user-specified function
 //    __asan_default_options().
 // 4) overriden from env variable ASAN_OPTIONS.
+// 5) overriden during ASan activation (for now used on Android only).
 
 namespace __asan {
 
 struct Flags {
-  // Flag descriptions are in asan_rtl.cc.
-  int  quarantine_size;
-  int  redzone;
-  int  max_redzone;
-  bool debug;
-  int  report_globals;
-  bool check_initialization_order;
-  bool replace_str;
-  bool replace_intrin;
-  bool mac_ignore_invalid_free;
-  bool detect_stack_use_after_return;
-  int min_uar_stack_size_log;
-  int max_uar_stack_size_log;
-  bool uar_noreserve;
-  int max_malloc_fill_size, malloc_fill_byte;
-  int  exitcode;
-  bool allow_user_poisoning;
-  int  sleep_before_dying;
-  bool check_malloc_usable_size;
-  bool unmap_shadow_on_exit;
-  bool abort_on_error;
-  bool print_stats;
-  bool print_legend;
-  bool atexit;
-  bool allow_reexec;
-  bool print_full_thread_history;
-  bool poison_heap;
-  bool poison_partial;
-  bool poison_array_cookie;
-  bool alloc_dealloc_mismatch;
-  bool new_delete_type_mismatch;
-  bool strict_memcmp;
-  bool strict_init_order;
-  bool start_deactivated;
-  int detect_invalid_pointer_pairs;
-  bool detect_container_overflow;
-  int detect_odr_violation;
-  bool dump_instruction_bytes;
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+
+  void SetDefaults();
 };
 
 extern Flags asan_flags_dont_use_directly;
 inline Flags *flags() {
   return &asan_flags_dont_use_directly;
 }
-void InitializeFlags(Flags *f, const char *env);
+
+void InitializeFlags();
 
 }  // namespace __asan
 
diff --git a/lib/asan/asan_flags.inc b/lib/asan/asan_flags.inc
new file mode 100644
index 0000000..53a8a40
--- /dev/null
+++ b/lib/asan/asan_flags.inc
@@ -0,0 +1,145 @@
+//===-- asan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ASan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_FLAG
+# error "Define ASAN_FLAG prior to including this file!"
+#endif
+
+// ASAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+ASAN_FLAG(int, quarantine_size, -1,
+            "Deprecated, please use quarantine_size_mb.")
+ASAN_FLAG(int, quarantine_size_mb, -1,
+          "Size (in Mb) of quarantine used to detect use-after-free "
+          "errors. Lower value may reduce memory usage but increase the "
+          "chance of false negatives.")
+ASAN_FLAG(int, redzone, 16,
+          "Minimal size (in bytes) of redzones around heap objects. "
+          "Requirement: redzone >= 16, is a power of two.")
+ASAN_FLAG(int, max_redzone, 2048,
+          "Maximal size (in bytes) of redzones around heap objects.")
+ASAN_FLAG(
+    bool, debug, false,
+    "If set, prints some debugging information and does additional checks.")
+ASAN_FLAG(
+    int, report_globals, 1,
+    "Controls the way to handle globals (0 - don't detect buffer overflow on "
+    "globals, 1 - detect buffer overflow, 2 - print data about registered "
+    "globals).")
+ASAN_FLAG(bool, check_initialization_order, false,
+          "If set, attempts to catch initialization order issues.")
+ASAN_FLAG(
+    bool, replace_str, true,
+    "If set, uses custom wrappers and replacements for libc string functions "
+    "to find more errors.")
+ASAN_FLAG(bool, replace_intrin, true,
+          "If set, uses custom wrappers for memset/memcpy/memmove intinsics.")
+ASAN_FLAG(bool, mac_ignore_invalid_free, false,
+          "Ignore invalid free() calls to work around some bugs. Used on OS X "
+          "only.")
+ASAN_FLAG(bool, detect_stack_use_after_return, false,
+          "Enables stack-use-after-return checking at run-time.")
+ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
+          "Minimum fake stack size log.")
+ASAN_FLAG(int, max_uar_stack_size_log,
+          20, // 1Mb per size class, i.e. ~11Mb per thread
+          "Maximum fake stack size log.")
+ASAN_FLAG(bool, uar_noreserve, false,
+          "Use mmap with 'noreserve' flag to allocate fake stack.")
+ASAN_FLAG(
+    int, max_malloc_fill_size, 0x1000,  // By default, fill only the first 4K.
+    "ASan allocator flag. max_malloc_fill_size is the maximal amount of "
+    "bytes that will be filled with malloc_fill_byte on malloc.")
+ASAN_FLAG(int, malloc_fill_byte, 0xbe,
+          "Value used to fill the newly allocated memory.")
+ASAN_FLAG(int, exitcode, ASAN_DEFAULT_FAILURE_EXITCODE,
+          "Override the program exit status if the tool found an error.")
+ASAN_FLAG(bool, allow_user_poisoning, true,
+          "If set, user may manually mark memory regions as poisoned or "
+          "unpoisoned.")
+ASAN_FLAG(
+    int, sleep_before_dying, 0,
+    "Number of seconds to sleep between printing an error report and "
+    "terminating the program. Useful for debugging purposes (e.g. when one "
+    "needs to attach gdb).")
+ASAN_FLAG(bool, check_malloc_usable_size, true,
+          "Allows the users to work around the bug in Nvidia drivers prior to "
+          "295.*.")
+ASAN_FLAG(bool, unmap_shadow_on_exit, false,
+          "If set, explicitly unmaps the (huge) shadow at exit.")
+ASAN_FLAG(
+    bool, abort_on_error, false,
+    "If set, the tool calls abort() instead of _exit() after printing the "
+    "error report.")
+ASAN_FLAG(bool, print_stats, false,
+          "Print various statistics after printing an error message or if "
+          "atexit=1.")
+ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
+ASAN_FLAG(bool, atexit, false,
+          "If set, prints ASan exit stats even after program terminates "
+          "successfully.")
+ASAN_FLAG(
+    bool, print_full_thread_history, true,
+    "If set, prints thread creation stacks for the threads involved in the "
+    "report and their ancestors up to the main thread.")
+ASAN_FLAG(
+    bool, poison_heap, true,
+    "Poison (or not) the heap memory on [de]allocation. Zero value is useful "
+    "for benchmarking the allocator or instrumentator.")
+ASAN_FLAG(bool, poison_partial, true,
+          "If true, poison partially addressable 8-byte aligned words "
+          "(default=true). This flag affects heap and global buffers, but not "
+          "stack buffers.")
+ASAN_FLAG(bool, poison_array_cookie, true,
+          "Poison (or not) the array cookie after operator new[].")
+
+// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
+// https://code.google.com/p/address-sanitizer/issues/detail?id=131
+// https://code.google.com/p/address-sanitizer/issues/detail?id=309
+// TODO(glider,timurrrr): Fix known issues and enable this back.
+ASAN_FLAG(bool, alloc_dealloc_mismatch,
+          (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
+          "Report errors on malloc/delete, new/free, new/delete[], etc.")
+
+ASAN_FLAG(bool, new_delete_type_mismatch, true,
+          "Report errors on mismatch betwen size of new and delete.")
+ASAN_FLAG(bool, strict_memcmp, true,
+          "If true, assume that memcmp(p1, p2, n) always reads n bytes before "
+          "comparing p1 and p2.")
+ASAN_FLAG(
+    bool, strict_init_order, false,
+    "If true, assume that dynamic initializers can never access globals from "
+    "other modules, even if the latter are already initialized.")
+ASAN_FLAG(
+    bool, start_deactivated, false,
+    "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
+    "poisoning) to reduce memory consumption as much as possible, and "
+    "restores them to original values when the first instrumented module is "
+    "loaded into the process. This is mainly intended to be used on "
+    "Android. ")
+ASAN_FLAG(
+    int, detect_invalid_pointer_pairs, 0,
+    "If non-zero, try to detect operations like <, <=, >, >= and - on "
+    "invalid pointer pairs (e.g. when pointers belong to different objects). "
+    "The bigger the value the harder we try.")
+ASAN_FLAG(
+    bool, detect_container_overflow, true,
+    "If true, honor the container overflow  annotations. "
+    "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow")
+ASAN_FLAG(int, detect_odr_violation, 2,
+          "If >=2, detect violation of One-Definition-Rule (ODR); "
+          "If ==1, detect ODR-violation only if the two variables "
+          "have different sizes")
+ASAN_FLAG(bool, dump_instruction_bytes, false,
+          "If true, dump 16 bytes starting at the instruction that caused SEGV")
+ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
diff --git a/lib/asan/asan_globals.cc b/lib/asan/asan_globals.cc
index be111d4..853a181 100644
--- a/lib/asan/asan_globals.cc
+++ b/lib/asan/asan_globals.cc
@@ -18,6 +18,7 @@
 #include "asan_report.h"
 #include "asan_stack.h"
 #include "asan_stats.h"
+#include "asan_suppressions.h"
 #include "asan_thread.h"
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_mutex.h"
@@ -158,13 +159,14 @@
       // the entire redzone of the second global may be within the first global.
       for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
         if (g->beg == l->g->beg &&
-            (flags()->detect_odr_violation >= 2 || g->size != l->g->size))
+            (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+            !IsODRViolationSuppressed(g->name))
           ReportODRViolation(g, FindRegistrationSite(g),
                              l->g, FindRegistrationSite(l->g));
       }
     }
   }
-  if (flags()->poison_heap)
+  if (CanPoisonMemory())
     PoisonRedZones(*g);
   ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
   l->g = g;
@@ -182,11 +184,13 @@
 
 static void UnregisterGlobal(const Global *g) {
   CHECK(asan_inited);
+  if (flags()->report_globals >= 2)
+    ReportGlobal(*g, "Removed");
   CHECK(flags()->report_globals);
   CHECK(AddrIsInMem(g->beg));
   CHECK(AddrIsAlignedByGranularity(g->beg));
   CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
-  if (flags()->poison_heap)
+  if (CanPoisonMemory())
     PoisonShadowForGlobal(g, 0);
   // We unpoison the shadow memory for the global but we do not remove it from
   // the list because that would require O(n^2) time with the current list
@@ -208,6 +212,20 @@
   }
 }
 
+#if SANITIZER_WINDOWS  // Should only be called on Windows.
+SANITIZER_INTERFACE_ATTRIBUTE
+void UnregisterGlobalsInRange(void *beg, void *end) {
+  if (!flags()->report_globals)
+    return;
+  BlockingMutexLock lock(&mu_for_globals);
+  for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+    void *address = (void *)l->g->beg;
+    if (beg <= address && address < end)
+      UnregisterGlobal(l->g);
+  }
+}
+#endif
+
 }  // namespace __asan
 
 // ---------------------- Interface ---------------- {{{1
@@ -216,7 +234,7 @@
 // Register an array of globals.
 void __asan_register_globals(__asan_global *globals, uptr n) {
   if (!flags()->report_globals) return;
-  GET_STACK_TRACE_FATAL_HERE;
+  GET_STACK_TRACE_MALLOC;
   u32 stack_id = StackDepotPut(stack);
   BlockingMutexLock lock(&mu_for_globals);
   if (!global_registration_site_vector)
@@ -249,7 +267,7 @@
 // initializer can only touch global variables in the same TU.
 void __asan_before_dynamic_init(const char *module_name) {
   if (!flags()->check_initialization_order ||
-      !flags()->poison_heap)
+      !CanPoisonMemory())
     return;
   bool strict_init_order = flags()->strict_init_order;
   CHECK(dynamic_init_globals);
@@ -275,7 +293,7 @@
 // TU are poisoned.  It simply unpoisons all dynamically initialized globals.
 void __asan_after_dynamic_init() {
   if (!flags()->check_initialization_order ||
-      !flags()->poison_heap)
+      !CanPoisonMemory())
     return;
   CHECK(asan_inited);
   BlockingMutexLock lock(&mu_for_globals);
diff --git a/lib/asan/asan_init_version.h b/lib/asan/asan_init_version.h
index 77aea81..6cf57c4 100644
--- a/lib/asan/asan_init_version.h
+++ b/lib/asan/asan_init_version.h
@@ -25,8 +25,10 @@
   //         contains the function PC as the 3-rd field (see
   //         DescribeAddressIfStack).
   // v3=>v4: added '__asan_global_source_location' to __asan_global.
-  #define __asan_init __asan_init_v4
-  #define __asan_init_name "__asan_init_v4"
+  // v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
+  //         __asan_stack_free_ functions.
+  #define __asan_init __asan_init_v5
+  #define __asan_init_name "__asan_init_v5"
 }
 
 #endif  // ASAN_INIT_VERSION_H
diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc
index deac034..df57696 100644
--- a/lib/asan/asan_interceptors.cc
+++ b/lib/asan/asan_interceptors.cc
@@ -20,6 +20,7 @@
 #include "asan_report.h"
 #include "asan_stack.h"
 #include "asan_stats.h"
+#include "asan_suppressions.h"
 #include "sanitizer_common/sanitizer_libc.h"
 
 namespace __asan {
@@ -34,12 +35,16 @@
   return false;
 }
 
+struct AsanInterceptorContext {
+  const char *interceptor_name;
+};
+
 // We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
 // and ASAN_WRITE_RANGE as macro instead of function so
 // that no extra frames are created, and stack trace contains
 // relevant information only.
 // We check all shadow bytes.
-#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do {                 \
+#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do {            \
     uptr __offset = (uptr)(offset);                                     \
     uptr __size = (uptr)(size);                                         \
     uptr __bad = 0;                                                     \
@@ -49,13 +54,26 @@
     }                                                                   \
     if (!QuickCheckForUnpoisonedRegion(__offset, __size) &&             \
         (__bad = __asan_region_is_poisoned(__offset, __size))) {        \
-      GET_CURRENT_PC_BP_SP;                                             \
-      __asan_report_error(pc, bp, sp, __bad, isWrite, __size);          \
+      AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx;     \
+      bool suppressed = false;                                          \
+      if (_ctx) {                                                       \
+        suppressed = IsInterceptorSuppressed(_ctx->interceptor_name);   \
+        if (!suppressed && HaveStackTraceBasedSuppressions()) {         \
+          GET_STACK_TRACE_FATAL_HERE;                                   \
+          suppressed = IsStackTraceSuppressed(&stack);                  \
+        }                                                               \
+      }                                                                 \
+      if (!suppressed) {                                                \
+        GET_CURRENT_PC_BP_SP;                                           \
+        __asan_report_error(pc, bp, sp, __bad, isWrite, __size);        \
+      }                                                                 \
     }                                                                   \
   } while (0)
 
-#define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false)
-#define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true)
+#define ASAN_READ_RANGE(ctx, offset, size) \
+  ACCESS_MEMORY_RANGE(ctx, offset, size, false)
+#define ASAN_WRITE_RANGE(ctx, offset, size) \
+  ACCESS_MEMORY_RANGE(ctx, offset, size, true)
 
 // Behavior of functions like "memcpy" or "strcpy" is undefined
 // if memory intervals overlap. We report error in this case.
@@ -113,20 +131,28 @@
 #define ASAN_INTERCEPT_FUNC(name)
 #endif  // SANITIZER_MAC
 
+#define ASAN_INTERCEPTOR_ENTER(ctx, func)                                      \
+  AsanInterceptorContext _ctx = {#func};                                       \
+  ctx = (void *)&_ctx;                                                         \
+  (void) ctx;                                                                  \
+
 #define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
-  ASAN_WRITE_RANGE(ptr, size)
-#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size)
+  ASAN_WRITE_RANGE(ctx, ptr, size)
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+  ASAN_READ_RANGE(ctx, ptr, size)
 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...)                               \
+  ASAN_INTERCEPTOR_ENTER(ctx, func);                                           \
   do {                                                                         \
     if (asan_init_is_running)                                                  \
       return REAL(func)(__VA_ARGS__);                                          \
-    ctx = 0;                                                                   \
-    (void) ctx;                                                                \
     if (SANITIZER_MAC && UNLIKELY(!asan_inited))                               \
       return REAL(func)(__VA_ARGS__);                                          \
     ENSURE_ASAN_INITED();                                                      \
   } while (false)
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+  do {                                            \
+  } while (false)
 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
   do {                                         \
   } while (false)
@@ -145,14 +171,23 @@
   do {                                                         \
   } while (false)
 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
+// Strict init-order checking is dlopen-hostile:
+// https://code.google.com/p/address-sanitizer/issues/detail?id=178
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag)                           \
+  if (flags()->strict_init_order) {                                            \
+    StopInitOrderChecking();                                                   \
+  }
 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
-#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) CovUpdateMapping()
-#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CovUpdateMapping()
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
+  CoverageUpdateMapping()
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping()
 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
 #include "sanitizer_common/sanitizer_common_interceptors.inc"
 
-#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s)
-#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s)
+// Syscall interceptors don't have contexts, we don't support suppressions
+// for them.
+#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s)
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s)
 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
   do {                                       \
     (void)(p);                               \
@@ -165,46 +200,79 @@
   } while (false)
 #include "sanitizer_common/sanitizer_common_syscalls.inc"
 
+struct ThreadStartParam {
+  atomic_uintptr_t t;
+  atomic_uintptr_t is_registered;
+};
+
 static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
+#if SANITIZER_WINDOWS
+  // FIXME: this is a bandaid fix for PR22025.
   AsanThread *t = (AsanThread*)arg;
   SetCurrentThread(t);
-  return t->ThreadStart(GetTid());
+  return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr);
+#else
+  ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
+  AsanThread *t = nullptr;
+  while ((t = reinterpret_cast<AsanThread *>(
+              atomic_load(&param->t, memory_order_acquire))) == 0)
+    internal_sched_yield();
+  SetCurrentThread(t);
+  return t->ThreadStart(GetTid(), &param->is_registered);
+#endif
 }
 
 #if ASAN_INTERCEPT_PTHREAD_CREATE
 INTERCEPTOR(int, pthread_create, void *thread,
     void *attr, void *(*start_routine)(void*), void *arg) {
   EnsureMainThreadIDIsCorrect();
-  // Strict init-order checking in thread-hostile.
+  // Strict init-order checking is thread-hostile.
   if (flags()->strict_init_order)
     StopInitOrderChecking();
   GET_STACK_TRACE_THREAD;
   int detached = 0;
   if (attr != 0)
     REAL(pthread_attr_getdetachstate)(attr, &detached);
-
-  u32 current_tid = GetCurrentTidOrInvalid();
-  AsanThread *t = AsanThread::Create(start_routine, arg);
-  CreateThreadContextArgs args = { t, &stack };
-  asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args);
-  return REAL(pthread_create)(thread, attr, asan_thread_start, t);
+  ThreadStartParam param;
+  atomic_store(&param.t, 0, memory_order_relaxed);
+  atomic_store(&param.is_registered, 0, memory_order_relaxed);
+  int result = REAL(pthread_create)(thread, attr, asan_thread_start, &param);
+  if (result == 0) {
+    u32 current_tid = GetCurrentTidOrInvalid();
+    AsanThread *t =
+        AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+    atomic_store(&param.t, reinterpret_cast<uptr>(t), memory_order_release);
+    // Wait until the AsanThread object is initialized and the ThreadRegistry
+    // entry is in "started" state. One reason for this is that after this
+    // interceptor exits, the child thread's stack may be the only thing holding
+    // the |arg| pointer. This may cause LSan to report a leak if leak checking
+    // happens at a point when the interceptor has already exited, but the stack
+    // range for the child thread is not yet known.
+    while (atomic_load(&param.is_registered, memory_order_acquire) == 0)
+      internal_sched_yield();
+  }
+  return result;
 }
+
+INTERCEPTOR(int, pthread_join, void *t, void **arg) {
+  return real_pthread_join(t, arg);
+}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
 #endif  // ASAN_INTERCEPT_PTHREAD_CREATE
 
 #if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
 
 #if SANITIZER_ANDROID
 INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
-  if (!AsanInterceptsSignal(signum) ||
-      common_flags()->allow_user_segv_handler) {
+  if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
     return REAL(bsd_signal)(signum, handler);
   }
   return 0;
 }
 #else
 INTERCEPTOR(void*, signal, int signum, void *handler) {
-  if (!AsanInterceptsSignal(signum) ||
-      common_flags()->allow_user_segv_handler) {
+  if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
     return REAL(signal)(signum, handler);
   }
   return 0;
@@ -213,8 +281,7 @@
 
 INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
                             struct sigaction *oldact) {
-  if (!AsanInterceptsSignal(signum) ||
-      common_flags()->allow_user_segv_handler) {
+  if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
     return REAL(sigaction)(signum, act, oldact);
   }
   return 0;
@@ -325,14 +392,16 @@
 }
 
 INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, memcmp);
   if (UNLIKELY(!asan_inited)) return internal_memcmp(a1, a2, size);
   ENSURE_ASAN_INITED();
   if (flags()->replace_intrin) {
     if (flags()->strict_memcmp) {
       // Check the entire regions even if the first bytes of the buffers are
       // different.
-      ASAN_READ_RANGE(a1, size);
-      ASAN_READ_RANGE(a2, size);
+      ASAN_READ_RANGE(ctx, a1, size);
+      ASAN_READ_RANGE(ctx, a2, size);
       // Fallthrough to REAL(memcmp) below.
     } else {
       unsigned char c1 = 0, c2 = 0;
@@ -344,65 +413,81 @@
         c2 = s2[i];
         if (c1 != c2) break;
       }
-      ASAN_READ_RANGE(s1, Min(i + 1, size));
-      ASAN_READ_RANGE(s2, Min(i + 1, size));
+      ASAN_READ_RANGE(ctx, s1, Min(i + 1, size));
+      ASAN_READ_RANGE(ctx, s2, Min(i + 1, size));
       return CharCmp(c1, c2);
     }
   }
   return REAL(memcmp(a1, a2, size));
 }
 
+// memcpy is called during __asan_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do {                             \
+    if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size);        \
+    if (asan_init_is_running) {                                                \
+      return REAL(memcpy)(to, from, size);                                     \
+    }                                                                          \
+    ENSURE_ASAN_INITED();                                                      \
+    if (flags()->replace_intrin) {                                             \
+      if (to != from) {                                                        \
+        CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);                  \
+      }                                                                        \
+      ASAN_READ_RANGE(ctx, from, size);                                        \
+      ASAN_WRITE_RANGE(ctx, to, size);                                         \
+    }                                                                          \
+    return REAL(memcpy)(to, from, size);                                       \
+  } while (0)
+
+
 void *__asan_memcpy(void *to, const void *from, uptr size) {
-  if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size);
-  // memcpy is called during __asan_init() from the internals
-  // of printf(...).
-  if (asan_init_is_running) {
-    return REAL(memcpy)(to, from, size);
-  }
-  ENSURE_ASAN_INITED();
-  if (flags()->replace_intrin) {
-    if (to != from) {
-      // We do not treat memcpy with to==from as a bug.
-      // See http://llvm.org/bugs/show_bug.cgi?id=11763.
-      CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);
-    }
-    ASAN_READ_RANGE(from, size);
-    ASAN_WRITE_RANGE(to, size);
-  }
-  return REAL(memcpy)(to, from, size);
+  ASAN_MEMCPY_IMPL(nullptr, to, from, size);
 }
 
+// memset is called inside Printf.
+#define ASAN_MEMSET_IMPL(ctx, block, c, size) do {                             \
+    if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size);        \
+    if (asan_init_is_running) {                                                \
+      return REAL(memset)(block, c, size);                                     \
+    }                                                                          \
+    ENSURE_ASAN_INITED();                                                      \
+    if (flags()->replace_intrin) {                                             \
+      ASAN_WRITE_RANGE(ctx, block, size);                                      \
+    }                                                                          \
+    return REAL(memset)(block, c, size);                                       \
+  } while (0)
+
 void *__asan_memset(void *block, int c, uptr size) {
-  if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size);
-  // memset is called inside Printf.
-  if (asan_init_is_running) {
-    return REAL(memset)(block, c, size);
-  }
-  ENSURE_ASAN_INITED();
-  if (flags()->replace_intrin) {
-    ASAN_WRITE_RANGE(block, size);
-  }
-  return REAL(memset)(block, c, size);
+  ASAN_MEMSET_IMPL(nullptr, block, c, size);
 }
 
+#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do {                            \
+    if (UNLIKELY(!asan_inited))                                                \
+      return internal_memmove(to, from, size);                                 \
+    ENSURE_ASAN_INITED();                                                      \
+    if (flags()->replace_intrin) {                                             \
+      ASAN_READ_RANGE(ctx, from, size);                                        \
+      ASAN_WRITE_RANGE(ctx, to, size);                                         \
+    }                                                                          \
+    return internal_memmove(to, from, size);                                   \
+  } while (0)
+
 void *__asan_memmove(void *to, const void *from, uptr size) {
-  if (UNLIKELY(!asan_inited))
-    return internal_memmove(to, from, size);
-  ENSURE_ASAN_INITED();
-  if (flags()->replace_intrin) {
-    ASAN_READ_RANGE(from, size);
-    ASAN_WRITE_RANGE(to, size);
-  }
-  return internal_memmove(to, from, size);
+  ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
 }
 
 INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
-  return __asan_memmove(to, from, size);
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, memmove);
+  ASAN_MEMMOVE_IMPL(ctx, to, from, size);
 }
 
 INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, memcpy);
 #if !SANITIZER_MAC
-  return __asan_memcpy(to, from, size);
+  ASAN_MEMCPY_IMPL(ctx, to, from, size);
 #else
   // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
   // with WRAP(memcpy). As a result, false positives are reported for memmove()
@@ -410,15 +495,19 @@
   // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
   // internal_memcpy(), which may lead to crashes, see
   // http://llvm.org/bugs/show_bug.cgi?id=16362.
-  return __asan_memmove(to, from, size);
+  ASAN_MEMMOVE_IMPL(ctx, to, from, size);
 #endif  // !SANITIZER_MAC
 }
 
 INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
-  return __asan_memset(block, c, size);
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, memset);
+  ASAN_MEMSET_IMPL(ctx, block, c, size);
 }
 
 INTERCEPTOR(char*, strchr, const char *str, int c) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, strchr);
   if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
   // strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
   // used.
@@ -429,7 +518,7 @@
   char *result = REAL(strchr)(str, c);
   if (flags()->replace_str) {
     uptr bytes_read = (result ? result - str : REAL(strlen)(str)) + 1;
-    ASAN_READ_RANGE(str, bytes_read);
+    ASAN_READ_RANGE(ctx, str, bytes_read);
   }
   return result;
 }
@@ -451,13 +540,15 @@
 // For both strcat() and strncat() we need to check the validity of |to|
 // argument irrespective of the |from| length.
 INTERCEPTOR(char*, strcat, char *to, const char *from) {  // NOLINT
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, strcat);  // NOLINT
   ENSURE_ASAN_INITED();
   if (flags()->replace_str) {
     uptr from_length = REAL(strlen)(from);
-    ASAN_READ_RANGE(from, from_length + 1);
+    ASAN_READ_RANGE(ctx, from, from_length + 1);
     uptr to_length = REAL(strlen)(to);
-    ASAN_READ_RANGE(to, to_length);
-    ASAN_WRITE_RANGE(to + to_length, from_length + 1);
+    ASAN_READ_RANGE(ctx, to, to_length);
+    ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
     // If the copying actually happens, the |from| string should not overlap
     // with the resulting string starting at |to|, which has a length of
     // to_length + from_length + 1.
@@ -470,14 +561,16 @@
 }
 
 INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, strncat);
   ENSURE_ASAN_INITED();
   if (flags()->replace_str) {
     uptr from_length = MaybeRealStrnlen(from, size);
     uptr copy_length = Min(size, from_length + 1);
-    ASAN_READ_RANGE(from, copy_length);
+    ASAN_READ_RANGE(ctx, from, copy_length);
     uptr to_length = REAL(strlen)(to);
-    ASAN_READ_RANGE(to, to_length);
-    ASAN_WRITE_RANGE(to + to_length, from_length + 1);
+    ASAN_READ_RANGE(ctx, to, to_length);
+    ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
     if (from_length > 0) {
       CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1,
                            from, copy_length);
@@ -487,6 +580,8 @@
 }
 
 INTERCEPTOR(char*, strcpy, char *to, const char *from) {  // NOLINT
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, strcpy);  // NOLINT
 #if SANITIZER_MAC
   if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from);  // NOLINT
 #endif
@@ -499,19 +594,21 @@
   if (flags()->replace_str) {
     uptr from_size = REAL(strlen)(from) + 1;
     CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size);
-    ASAN_READ_RANGE(from, from_size);
-    ASAN_WRITE_RANGE(to, from_size);
+    ASAN_READ_RANGE(ctx, from, from_size);
+    ASAN_WRITE_RANGE(ctx, to, from_size);
   }
   return REAL(strcpy)(to, from);  // NOLINT
 }
 
 #if ASAN_INTERCEPT_STRDUP
 INTERCEPTOR(char*, strdup, const char *s) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, strdup);
   if (UNLIKELY(!asan_inited)) return internal_strdup(s);
   ENSURE_ASAN_INITED();
   uptr length = REAL(strlen)(s);
   if (flags()->replace_str) {
-    ASAN_READ_RANGE(s, length + 1);
+    ASAN_READ_RANGE(ctx, s, length + 1);
   }
   GET_STACK_TRACE_MALLOC;
   void *new_mem = asan_malloc(length + 1, &stack);
@@ -521,6 +618,8 @@
 #endif
 
 INTERCEPTOR(SIZE_T, strlen, const char *s) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, strlen);
   if (UNLIKELY(!asan_inited)) return internal_strlen(s);
   // strlen is called from malloc_default_purgeable_zone()
   // in __asan::ReplaceSystemAlloc() on Mac.
@@ -530,37 +629,43 @@
   ENSURE_ASAN_INITED();
   SIZE_T length = REAL(strlen)(s);
   if (flags()->replace_str) {
-    ASAN_READ_RANGE(s, length + 1);
+    ASAN_READ_RANGE(ctx, s, length + 1);
   }
   return length;
 }
 
 INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, wcslen);
   SIZE_T length = REAL(wcslen)(s);
   if (!asan_init_is_running) {
     ENSURE_ASAN_INITED();
-    ASAN_READ_RANGE(s, (length + 1) * sizeof(wchar_t));
+    ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t));
   }
   return length;
 }
 
 INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, strncpy);
   ENSURE_ASAN_INITED();
   if (flags()->replace_str) {
     uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1);
     CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size);
-    ASAN_READ_RANGE(from, from_size);
-    ASAN_WRITE_RANGE(to, size);
+    ASAN_READ_RANGE(ctx, from, from_size);
+    ASAN_WRITE_RANGE(ctx, to, size);
   }
   return REAL(strncpy)(to, from, size);
 }
 
 #if ASAN_INTERCEPT_STRNLEN
 INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, strnlen);
   ENSURE_ASAN_INITED();
   uptr length = REAL(strnlen)(s, maxlen);
   if (flags()->replace_str) {
-    ASAN_READ_RANGE(s, Min(length + 1, maxlen));
+    ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen));
   }
   return length;
 }
@@ -585,6 +690,8 @@
 
 INTERCEPTOR(long, strtol, const char *nptr,  // NOLINT
             char **endptr, int base) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, strtol);
   ENSURE_ASAN_INITED();
   if (!flags()->replace_str) {
     return REAL(strtol)(nptr, endptr, base);
@@ -596,12 +703,14 @@
   }
   if (IsValidStrtolBase(base)) {
     FixRealStrtolEndptr(nptr, &real_endptr);
-    ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+    ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1);
   }
   return result;
 }
 
 INTERCEPTOR(int, atoi, const char *nptr) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, atoi);
 #if SANITIZER_MAC
   if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
 #endif
@@ -616,11 +725,13 @@
   // different from int). So, we just imitate this behavior.
   int result = REAL(strtol)(nptr, &real_endptr, 10);
   FixRealStrtolEndptr(nptr, &real_endptr);
-  ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+  ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1);
   return result;
 }
 
 INTERCEPTOR(long, atol, const char *nptr) {  // NOLINT
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, atol);
 #if SANITIZER_MAC
   if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
 #endif
@@ -631,13 +742,15 @@
   char *real_endptr;
   long result = REAL(strtol)(nptr, &real_endptr, 10);  // NOLINT
   FixRealStrtolEndptr(nptr, &real_endptr);
-  ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+  ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1);
   return result;
 }
 
 #if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
 INTERCEPTOR(long long, strtoll, const char *nptr,  // NOLINT
             char **endptr, int base) {
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, strtoll);
   ENSURE_ASAN_INITED();
   if (!flags()->replace_str) {
     return REAL(strtoll)(nptr, endptr, base);
@@ -652,12 +765,14 @@
   // if base is valid.
   if (IsValidStrtolBase(base)) {
     FixRealStrtolEndptr(nptr, &real_endptr);
-    ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+    ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1);
   }
   return result;
 }
 
 INTERCEPTOR(long long, atoll, const char *nptr) {  // NOLINT
+  void *ctx;
+  ASAN_INTERCEPTOR_ENTER(ctx, atoll);
   ENSURE_ASAN_INITED();
   if (!flags()->replace_str) {
     return REAL(atoll)(nptr);
@@ -665,7 +780,7 @@
   char *real_endptr;
   long long result = REAL(strtoll)(nptr, &real_endptr, 10);  // NOLINT
   FixRealStrtolEndptr(nptr, &real_endptr);
-  ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+  ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1);
   return result;
 }
 #endif  // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
@@ -703,15 +818,16 @@
                    void* security, uptr stack_size,
                    DWORD (__stdcall *start_routine)(void*), void* arg,
                    DWORD thr_flags, void* tid) {
-  // Strict init-order checking in thread-hostile.
+  // Strict init-order checking is thread-hostile.
   if (flags()->strict_init_order)
     StopInitOrderChecking();
   GET_STACK_TRACE_THREAD;
-  u32 current_tid = GetCurrentTidOrInvalid();
-  AsanThread *t = AsanThread::Create(start_routine, arg);
-  CreateThreadContextArgs args = { t, &stack };
+  // FIXME: The CreateThread interceptor is not the same as a pthread_create
+  // one.  This is a bandaid fix for PR22025.
   bool detached = false;  // FIXME: how can we determine it on Windows?
-  asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args);
+  u32 current_tid = GetCurrentTidOrInvalid();
+  AsanThread *t =
+        AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
   return REAL(CreateThread)(security, stack_size,
                             asan_thread_start, t, thr_flags, tid);
 }
@@ -797,6 +913,7 @@
   // Intercept threading-related functions
 #if ASAN_INTERCEPT_PTHREAD_CREATE
   ASAN_INTERCEPT_FUNC(pthread_create);
+  ASAN_INTERCEPT_FUNC(pthread_join);
 #endif
 
   // Intercept atexit function.
diff --git a/lib/asan/asan_interface_internal.h b/lib/asan/asan_interface_internal.h
index edaf44d..ea7540f 100644
--- a/lib/asan/asan_interface_internal.h
+++ b/lib/asan/asan_interface_internal.h
@@ -9,8 +9,11 @@
 //
 // This file is a part of AddressSanitizer, an address sanity checker.
 //
-// This header can be included by the instrumented program to fetch
-// data (mostly allocator statistics) from ASan runtime library.
+// This header declares the AddressSanitizer runtime interface functions.
+// The runtime library has to define these functions so the instrumented program
+// could call them.
+//
+// See also include/sanitizer/asan_interface.h
 //===----------------------------------------------------------------------===//
 #ifndef ASAN_INTERFACE_INTERNAL_H
 #define ASAN_INTERFACE_INTERNAL_H
diff --git a/lib/asan/asan_internal.h b/lib/asan/asan_internal.h
index f9f9243..ffd3ff8 100644
--- a/lib/asan/asan_internal.h
+++ b/lib/asan/asan_internal.h
@@ -62,6 +62,21 @@
 class AsanThread;
 using __sanitizer::StackTrace;
 
+struct SignalContext {
+  void *context;
+  uptr addr;
+  uptr pc;
+  uptr sp;
+  uptr bp;
+
+  SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp) :
+      context(context), addr(addr), pc(pc), sp(sp), bp(bp) {
+  }
+
+  // Creates signal context in a platform-specific manner.
+  static SignalContext Create(void *siginfo, void *context);
+};
+
 void AsanInitFromRtl();
 
 // asan_rtl.cc
@@ -78,8 +93,8 @@
 void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
 void AsanOnSIGSEGV(int, void *siginfo, void *context);
 
+void DisableReexec();
 void MaybeReexec();
-bool AsanInterceptsSignal(int signum);
 void ReadContextStack(void *context, uptr *stack, uptr *ssize);
 void AsanPlatformThreadInit();
 void StopInitOrderChecking();
@@ -92,10 +107,10 @@
 
 void AppendToErrorMessageBuffer(const char *buffer);
 
-void ParseExtraActivationFlags();
-
 void *AsanDlSymNext(const char *sym);
 
+void ReserveShadowMemoryRange(uptr beg, uptr end);
+
 // Platform-specific options.
 #if SANITIZER_MAC
 bool PlatformHasDifferentMemcpyAndMemmove();
@@ -136,6 +151,8 @@
 const int kAsanInternalHeapMagic = 0xfe;
 const int kAsanArrayCookieMagic = 0xac;
 const int kAsanIntraObjectRedzone = 0xbb;
+const int kAsanAllocaLeftMagic = 0xca;
+const int kAsanAllocaRightMagic = 0xcb;
 
 static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
 static const uptr kRetiredStackFrameMagic = 0x45E0360E;
diff --git a/lib/asan/asan_linux.cc b/lib/asan/asan_linux.cc
index fdd009c..8e8bafd 100644
--- a/lib/asan/asan_linux.cc
+++ b/lib/asan/asan_linux.cc
@@ -68,6 +68,10 @@
 
 namespace __asan {
 
+void DisableReexec() {
+  // No need to re-exec on Linux.
+}
+
 void MaybeReexec() {
   // No need to re-exec on Linux.
 }
@@ -220,10 +224,6 @@
 #endif
 }
 
-bool AsanInterceptsSignal(int signum) {
-  return signum == SIGSEGV && common_flags()->handle_segv;
-}
-
 void AsanPlatformThreadInit() {
   // Nothing here for now.
 }
diff --git a/lib/asan/asan_mac.cc b/lib/asan/asan_mac.cc
index 4014357..b353686 100644
--- a/lib/asan/asan_mac.cc
+++ b/lib/asan/asan_mac.cc
@@ -101,8 +101,15 @@
   }
 }
 
+static bool reexec_disabled = false;
+
+void DisableReexec() {
+  reexec_disabled = true;
+}
+
 void MaybeReexec() {
-  if (!flags()->allow_reexec) return;
+  if (reexec_disabled) return;
+
   // Make sure the dynamic ASan runtime library is preloaded so that the
   // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
   // ourselves.
@@ -113,8 +120,10 @@
   uptr old_env_len = dyld_insert_libraries ?
       internal_strlen(dyld_insert_libraries) : 0;
   uptr fname_len = internal_strlen(info.dli_fname);
+  const char *dylib_name = StripModuleName(info.dli_fname);
+  uptr dylib_name_len = internal_strlen(dylib_name);
   if (!dyld_insert_libraries ||
-      !REAL(strstr)(dyld_insert_libraries, StripModuleName(info.dli_fname))) {
+      !REAL(strstr)(dyld_insert_libraries, dylib_name)) {
     // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
     // library.
     char program_name[1024];
@@ -140,58 +149,74 @@
     VReport(1, "exec()-ing the program with\n");
     VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
     VReport(1, "to enable ASan wrappers.\n");
-    VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
     execv(program_name, *_NSGetArgv());
-  } else {
-    // DYLD_INSERT_LIBRARIES is set and contains the runtime library.
-    if (old_env_len == fname_len) {
-      // It's just the runtime library name - fine to unset the variable.
-      LeakyResetEnv(kDyldInsertLibraries, NULL);
-    } else {
-      uptr env_name_len = internal_strlen(kDyldInsertLibraries);
-      // Allocate memory to hold the previous env var name, its value, the '='
-      // sign and the '\0' char.
-      char *new_env = (char*)allocator_for_env.Allocate(
-          old_env_len + 2 + env_name_len);
-      CHECK(new_env);
-      internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
-      internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
-      new_env[env_name_len] = '=';
-      char *new_env_pos = new_env + env_name_len + 1;
 
-      // Iterate over colon-separated pieces of |dyld_insert_libraries|.
-      char *piece_start = dyld_insert_libraries;
-      char *piece_end = NULL;
-      char *old_env_end = dyld_insert_libraries + old_env_len;
-      do {
-        if (piece_start[0] == ':') piece_start++;
-        piece_end =  REAL(strchr)(piece_start, ':');
-        if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
-        if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
-        uptr piece_len = piece_end - piece_start;
-
-        // If the current piece isn't the runtime library name,
-        // append it to new_env.
-        if ((piece_len != fname_len) ||
-            (internal_strncmp(piece_start, info.dli_fname, fname_len) != 0)) {
-          if (new_env_pos != new_env + env_name_len + 1) {
-            new_env_pos[0] = ':';
-            new_env_pos++;
-          }
-          internal_strncpy(new_env_pos, piece_start, piece_len);
-        }
-        // Move on to the next piece.
-        new_env_pos += piece_len;
-        piece_start = piece_end;
-      } while (piece_start < old_env_end);
-
-      // Can't use setenv() here, because it requires the allocator to be
-      // initialized.
-      // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
-      // a separate function called after InitializeAllocator().
-      LeakyResetEnv(kDyldInsertLibraries, new_env);
-    }
+    // We get here only if execv() failed.
+    Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
+           "which is required for ASan to work. ASan tried to set the "
+           "environment variable and re-execute itself, but execv() failed, "
+           "possibly because of sandbox restrictions. Make sure to launch the "
+           "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
+    CHECK("execv failed" && 0);
   }
+
+  // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
+  // the dylib from the environment variable, because interceptors are installed
+  // and we don't want our children to inherit the variable.
+
+  uptr env_name_len = internal_strlen(kDyldInsertLibraries);
+  // Allocate memory to hold the previous env var name, its value, the '='
+  // sign and the '\0' char.
+  char *new_env = (char*)allocator_for_env.Allocate(
+      old_env_len + 2 + env_name_len);
+  CHECK(new_env);
+  internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
+  internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
+  new_env[env_name_len] = '=';
+  char *new_env_pos = new_env + env_name_len + 1;
+
+  // Iterate over colon-separated pieces of |dyld_insert_libraries|.
+  char *piece_start = dyld_insert_libraries;
+  char *piece_end = NULL;
+  char *old_env_end = dyld_insert_libraries + old_env_len;
+  do {
+    if (piece_start[0] == ':') piece_start++;
+    piece_end = REAL(strchr)(piece_start, ':');
+    if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
+    if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
+    uptr piece_len = piece_end - piece_start;
+
+    char *filename_start =
+        (char *)internal_memrchr(piece_start, '/', piece_len);
+    uptr filename_len = piece_len;
+    if (filename_start) {
+      filename_start += 1;
+      filename_len = piece_len - (filename_start - piece_start);
+    } else {
+      filename_start = piece_start;
+    }
+
+    // If the current piece isn't the runtime library name,
+    // append it to new_env.
+    if ((dylib_name_len != filename_len) ||
+        (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
+      if (new_env_pos != new_env + env_name_len + 1) {
+        new_env_pos[0] = ':';
+        new_env_pos++;
+      }
+      internal_strncpy(new_env_pos, piece_start, piece_len);
+      new_env_pos += piece_len;
+    }
+    // Move on to the next piece.
+    piece_start = piece_end;
+  } while (piece_start < old_env_end);
+
+  // Can't use setenv() here, because it requires the allocator to be
+  // initialized.
+  // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
+  // a separate function called after InitializeAllocator().
+  if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
+  LeakyResetEnv(kDyldInsertLibraries, new_env);
 }
 
 // No-op. Mac does not support static linkage anyway.
@@ -205,11 +230,6 @@
 // No-op. Mac does not support static linkage anyway.
 void AsanCheckIncompatibleRT() {}
 
-bool AsanInterceptsSignal(int signum) {
-  return (signum == SIGSEGV || signum == SIGBUS) &&
-         common_flags()->handle_segv;
-}
-
 void AsanPlatformThreadInit() {
 }
 
@@ -264,9 +284,8 @@
 void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
   AsanThread *t = GetCurrentThread();
   if (!t) {
-    t = AsanThread::Create(0, 0);
-    CreateThreadContextArgs args = { t, stack };
-    asanThreadRegistry().CreateThread(*(uptr*)t, true, parent_tid, &args);
+    t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
+                           parent_tid, stack, /* detached */ true);
     t->Init();
     asanThreadRegistry().StartThread(t->tid(), 0, 0);
     SetCurrentThread(t);
@@ -313,7 +332,7 @@
                                   dispatch_function_t func) {                 \
     GET_STACK_TRACE_THREAD;                                                   \
     asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
-    if (common_flags()->verbosity >= 2) {                                     \
+    if (Verbosity() >= 2) {                                     \
       Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n",             \
              asan_ctxt, pthread_self());                                      \
       PRINT_CURRENT_STACK();                                                  \
@@ -331,7 +350,7 @@
                                     dispatch_function_t func) {
   GET_STACK_TRACE_THREAD;
   asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
-  if (common_flags()->verbosity >= 2) {
+  if (Verbosity() >= 2) {
     Report("dispatch_after_f: %p\n", asan_ctxt);
     PRINT_CURRENT_STACK();
   }
@@ -344,7 +363,7 @@
                                           dispatch_function_t func) {
   GET_STACK_TRACE_THREAD;
   asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
-  if (common_flags()->verbosity >= 2) {
+  if (Verbosity() >= 2) {
     Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
            asan_ctxt, pthread_self());
     PRINT_CURRENT_STACK();
@@ -374,13 +393,6 @@
     work(); \
   }
 
-// Forces the compiler to generate a frame pointer in the function.
-#define ENABLE_FRAME_POINTER                                       \
-  do {                                                             \
-    volatile uptr enable_fp;                                       \
-    enable_fp = GET_CURRENT_FRAME();                               \
-  } while (0)
-
 INTERCEPTOR(void, dispatch_async,
             dispatch_queue_t dq, void(^work)(void)) {
   ENABLE_FRAME_POINTER;
@@ -404,6 +416,10 @@
 
 INTERCEPTOR(void, dispatch_source_set_cancel_handler,
             dispatch_source_t ds, void(^work)(void)) {
+  if (!work) {
+    REAL(dispatch_source_set_cancel_handler)(ds, work);
+    return;
+  }
   ENABLE_FRAME_POINTER;
   GET_ASAN_BLOCK(work);
   REAL(dispatch_source_set_cancel_handler)(ds, asan_block);
diff --git a/lib/asan/asan_malloc_mac.cc b/lib/asan/asan_malloc_mac.cc
index 2ef4c77..d7a6307 100644
--- a/lib/asan/asan_malloc_mac.cc
+++ b/lib/asan/asan_malloc_mac.cc
@@ -90,9 +90,9 @@
   ENSURE_ASAN_INITED();
   // Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes.
   size_t buflen = 6 + (name ? internal_strlen(name) : 0);
-  InternalScopedBuffer<char> new_name(buflen);
+  InternalScopedString new_name(buflen);
   if (name && zone->introspect == asan_zone.introspect) {
-    internal_snprintf(new_name.data(), buflen, "asan-%s", name);
+    new_name.append("asan-%s", name);
     name = new_name.data();
   }
 
@@ -152,13 +152,17 @@
 
 namespace {
 
-// TODO(glider): the mz_* functions should be united with the Linux wrappers,
-// as they are basically copied from there.
-size_t mz_size(malloc_zone_t* zone, const void* ptr) {
+// TODO(glider): the __asan_mz_* functions should be united with the Linux
+// wrappers, as they are basically copied from there.
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+size_t __asan_mz_size(malloc_zone_t* zone, const void* ptr) {
   return asan_mz_size(ptr);
 }
 
-void *mz_malloc(malloc_zone_t *zone, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_malloc(malloc_zone_t *zone, uptr size) {
   if (UNLIKELY(!asan_inited)) {
     CHECK(system_malloc_zone);
     return malloc_zone_malloc(system_malloc_zone, size);
@@ -167,7 +171,9 @@
   return asan_malloc(size, &stack);
 }
 
-void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
   if (UNLIKELY(!asan_inited)) {
     // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
     const size_t kCallocPoolSize = 1024;
@@ -183,7 +189,9 @@
   return asan_calloc(nmemb, size, &stack);
 }
 
-void *mz_valloc(malloc_zone_t *zone, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_valloc(malloc_zone_t *zone, size_t size) {
   if (UNLIKELY(!asan_inited)) {
     CHECK(system_malloc_zone);
     return malloc_zone_valloc(system_malloc_zone, size);
@@ -210,11 +218,15 @@
 }
 
 // TODO(glider): the allocation callbacks need to be refactored.
-void mz_free(malloc_zone_t *zone, void *ptr) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_mz_free(malloc_zone_t *zone, void *ptr) {
   free_common(zone, ptr);
 }
 
-void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
   if (!ptr) {
     GET_STACK_TRACE_MALLOC;
     return asan_malloc(size, &stack);
@@ -233,15 +245,16 @@
   }
 }
 
-void mz_destroy(malloc_zone_t* zone) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_mz_destroy(malloc_zone_t* zone) {
   // A no-op -- we will not be destroyed!
-  Report("mz_destroy() called -- ignoring\n");
+  Report("__asan_mz_destroy() called -- ignoring\n");
 }
 
-  // from AvailabilityMacros.h
-#if defined(MAC_OS_X_VERSION_10_6) && \
-    MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
-void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
   if (UNLIKELY(!asan_inited)) {
     CHECK(system_malloc_zone);
     return malloc_zone_memalign(system_malloc_zone, align, size);
@@ -252,12 +265,12 @@
 
 // This function is currently unused, and we build with -Werror.
 #if 0
-void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) {
+void __asan_mz_free_definite_size(
+    malloc_zone_t* zone, void *ptr, size_t size) {
   // TODO(glider): check that |size| is valid.
   UNIMPLEMENTED();
 }
 #endif
-#endif
 
 kern_return_t mi_enumerator(task_t task, void *,
                             unsigned type_mask, vm_address_t zone_address,
@@ -299,13 +312,10 @@
   internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
 }
 
-#if defined(MAC_OS_X_VERSION_10_6) && \
-    MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
 boolean_t mi_zone_locked(malloc_zone_t *zone) {
   // UNIMPLEMENTED();
   return false;
 }
-#endif
 
 }  // unnamed namespace
 
@@ -324,32 +334,25 @@
   asan_introspection.force_lock = &mi_force_lock;
   asan_introspection.force_unlock = &mi_force_unlock;
   asan_introspection.statistics = &mi_statistics;
+  asan_introspection.zone_locked = &mi_zone_locked;
 
   internal_memset(&asan_zone, 0, sizeof(malloc_zone_t));
 
-  // Start with a version 4 zone which is used for OS X 10.4 and 10.5.
-  asan_zone.version = 4;
+  // Use version 6 for OSX >= 10.6.
+  asan_zone.version = 6;
   asan_zone.zone_name = "asan";
-  asan_zone.size = &mz_size;
-  asan_zone.malloc = &mz_malloc;
-  asan_zone.calloc = &mz_calloc;
-  asan_zone.valloc = &mz_valloc;
-  asan_zone.free = &mz_free;
-  asan_zone.realloc = &mz_realloc;
-  asan_zone.destroy = &mz_destroy;
+  asan_zone.size = &__asan_mz_size;
+  asan_zone.malloc = &__asan_mz_malloc;
+  asan_zone.calloc = &__asan_mz_calloc;
+  asan_zone.valloc = &__asan_mz_valloc;
+  asan_zone.free = &__asan_mz_free;
+  asan_zone.realloc = &__asan_mz_realloc;
+  asan_zone.destroy = &__asan_mz_destroy;
   asan_zone.batch_malloc = 0;
   asan_zone.batch_free = 0;
-  asan_zone.introspect = &asan_introspection;
-
-  // from AvailabilityMacros.h
-#if defined(MAC_OS_X_VERSION_10_6) && \
-    MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
-  // Switch to version 6 on OSX 10.6 to support memalign.
-  asan_zone.version = 6;
   asan_zone.free_definite_size = 0;
-  asan_zone.memalign = &mz_memalign;
-  asan_introspection.zone_locked = &mi_zone_locked;
-#endif
+  asan_zone.memalign = &__asan_mz_memalign;
+  asan_zone.introspect = &asan_introspection;
 
   // Register the ASan zone.
   malloc_zone_register(&asan_zone);
diff --git a/lib/asan/asan_mapping.h b/lib/asan/asan_mapping.h
index 2746754..5cb011d 100644
--- a/lib/asan/asan_mapping.h
+++ b/lib/asan/asan_mapping.h
@@ -59,13 +59,20 @@
 // || `[0x20000000, 0x23ffffff]` || LowShadow  ||
 // || `[0x00000000, 0x1fffffff]` || LowMem     ||
 //
-// Default Linux/MIPS mapping:
+// Default Linux/MIPS32 mapping:
 // || `[0x2aaa0000, 0xffffffff]` || HighMem    ||
 // || `[0x0fff4000, 0x2aa9ffff]` || HighShadow ||
 // || `[0x0bff4000, 0x0fff3fff]` || ShadowGap  ||
 // || `[0x0aaa0000, 0x0bff3fff]` || LowShadow  ||
 // || `[0x00000000, 0x0aa9ffff]` || LowMem     ||
 //
+// Default Linux/MIPS64 mapping:
+// || `[0x4000000000, 0xffffffffff]` || HighMem    ||
+// || `[0x2800000000, 0x3fffffffff]` || HighShadow ||
+// || `[0x2400000000, 0x27ffffffff]` || ShadowGap  ||
+// || `[0x2000000000, 0x23ffffffff]` || LowShadow  ||
+// || `[0x0000000000, 0x1fffffffff]` || LowMem     ||
+//
 // Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
 // || `[0x500000000000, 0x7fffffffffff]` || HighMem    ||
 // || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
@@ -79,6 +86,15 @@
 // || `[0x48000000, 0x4bffffff]` || ShadowGap  ||
 // || `[0x40000000, 0x47ffffff]` || LowShadow  ||
 // || `[0x00000000, 0x3fffffff]` || LowMem     ||
+//
+// Default Windows/i386 mapping:
+// (the exact location of HighShadow/HighMem may vary depending
+//  on WoW64, /LARGEADDRESSAWARE, etc).
+// || `[0x50000000, 0xffffffff]` || HighMem    ||
+// || `[0x3a000000, 0x4fffffff]` || HighShadow ||
+// || `[0x36000000, 0x39ffffff]` || ShadowGap  ||
+// || `[0x30000000, 0x35ffffff]` || LowShadow  ||
+// || `[0x00000000, 0x2fffffff]` || LowMem     ||
 
 static const u64 kDefaultShadowScale = 3;
 static const u64 kDefaultShadowOffset32 = 1ULL << 29;  // 0x20000000
@@ -87,10 +103,11 @@
 static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000;  // < 2G.
 static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
 static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
-static const u64 kMIPS64_ShadowOffset64 = 1ULL << 36;
+static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
 static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
 static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30;  // 0x40000000
 static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46;  // 0x400000000000
+static const u64 kWindowsShadowOffset32 = 3ULL << 28;  // 0x30000000
 
 #define SHADOW_SCALE kDefaultShadowScale
 #if SANITIZER_ANDROID
@@ -101,12 +118,12 @@
 #    define SHADOW_OFFSET kMIPS32_ShadowOffset32
 #  elif SANITIZER_FREEBSD
 #    define SHADOW_OFFSET kFreeBSD_ShadowOffset32
+#  elif SANITIZER_IOS
+#    define SHADOW_OFFSET kIosShadowOffset32
+#  elif SANITIZER_WINDOWS
+#    define SHADOW_OFFSET kWindowsShadowOffset32
 #  else
-#    if SANITIZER_IOS
-#      define SHADOW_OFFSET kIosShadowOffset32
-#    else
-#      define SHADOW_OFFSET kDefaultShadowOffset32
-#    endif
+#    define SHADOW_OFFSET kDefaultShadowOffset32
 #  endif
 # else
 #  if defined(__aarch64__)
diff --git a/lib/asan/asan_poisoning.cc b/lib/asan/asan_poisoning.cc
index 1c6e92f..e2b1f4d 100644
--- a/lib/asan/asan_poisoning.cc
+++ b/lib/asan/asan_poisoning.cc
@@ -15,13 +15,24 @@
 #include "asan_poisoning.h"
 #include "asan_report.h"
 #include "asan_stack.h"
+#include "sanitizer_common/sanitizer_atomic.h"
 #include "sanitizer_common/sanitizer_libc.h"
 #include "sanitizer_common/sanitizer_flags.h"
 
 namespace __asan {
 
+static atomic_uint8_t can_poison_memory;
+
+void SetCanPoisonMemory(bool value) {
+  atomic_store(&can_poison_memory, value, memory_order_release);
+}
+
+bool CanPoisonMemory() {
+  return atomic_load(&can_poison_memory, memory_order_acquire);
+}
+
 void PoisonShadow(uptr addr, uptr size, u8 value) {
-  if (!flags()->poison_heap) return;
+  if (!CanPoisonMemory()) return;
   CHECK(AddrIsAlignedByGranularity(addr));
   CHECK(AddrIsInMem(addr));
   CHECK(AddrIsAlignedByGranularity(addr + size));
@@ -34,7 +45,7 @@
                                      uptr size,
                                      uptr redzone_size,
                                      u8 value) {
-  if (!flags()->poison_heap) return;
+  if (!CanPoisonMemory()) return;
   CHECK(AddrIsAlignedByGranularity(addr));
   CHECK(AddrIsInMem(addr));
   FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
@@ -63,10 +74,10 @@
 
 void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
   uptr end = ptr + size;
-  if (common_flags()->verbosity) {
+  if (Verbosity()) {
     Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
            poison ? "" : "un", ptr, end, size);
-    if (common_flags()->verbosity >= 2)
+    if (Verbosity() >= 2)
       PRINT_CURRENT_STACK();
   }
   CHECK(size);
diff --git a/lib/asan/asan_poisoning.h b/lib/asan/asan_poisoning.h
index feda1a9..3fc9464 100644
--- a/lib/asan/asan_poisoning.h
+++ b/lib/asan/asan_poisoning.h
@@ -19,6 +19,10 @@
 
 namespace __asan {
 
+// Enable/disable memory poisoning.
+void SetCanPoisonMemory(bool value);
+bool CanPoisonMemory();
+
 // Poisons the shadow memory for "size" bytes starting from "addr".
 void PoisonShadow(uptr addr, uptr size, u8 value);
 
@@ -34,7 +38,7 @@
 // performance-critical code with care.
 ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
                                     u8 value) {
-  DCHECK(flags()->poison_heap);
+  DCHECK(CanPoisonMemory());
   uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
   uptr shadow_end = MEM_TO_SHADOW(
       aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
@@ -60,15 +64,14 @@
       if (page_end != shadow_end) {
         REAL(memset)((void *)page_end, 0, shadow_end - page_end);
       }
-      void *res = MmapFixedNoReserve(page_beg, page_end - page_beg);
-      CHECK_EQ(page_beg, res);
+      ReserveShadowMemoryRange(page_beg, page_end - 1);
     }
   }
 }
 
 ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
     uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
-  DCHECK(flags()->poison_heap);
+  DCHECK(CanPoisonMemory());
   bool poison_partial = flags()->poison_partial;
   u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
   for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
diff --git a/lib/asan/asan_posix.cc b/lib/asan/asan_posix.cc
index c910e23..ad31458 100644
--- a/lib/asan/asan_posix.cc
+++ b/lib/asan/asan_posix.cc
@@ -32,19 +32,24 @@
 
 namespace __asan {
 
+SignalContext SignalContext::Create(void *siginfo, void *context) {
+  uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
+  uptr pc, sp, bp;
+  GetPcSpBp(context, &pc, &sp, &bp);
+  return SignalContext(context, addr, pc, sp, bp);
+}
+
 void AsanOnSIGSEGV(int, void *siginfo, void *context) {
   ScopedDeadlySignal signal_scope(GetCurrentThread());
-  uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
   int code = (int)((siginfo_t*)siginfo)->si_code;
   // Write the first message using the bullet-proof write.
   if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die();
-  uptr pc, sp, bp;
-  GetPcSpBp(context, &pc, &sp, &bp);
+  SignalContext sig = SignalContext::Create(siginfo, context);
 
   // Access at a reasonable offset above SP, or slightly below it (to account
   // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
   // probably a stack overflow.
-  bool IsStackAccess = addr + 512 > sp && addr < sp + 0xFFFF;
+  bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF;
 
 #if __powerpc__
   // Large stack frames can be allocated with e.g.
@@ -53,8 +58,8 @@
   // If the store faults then sp will not have been updated, so test above
   // will not work, becase the fault address will be more than just "slightly"
   // below sp.
-  if (!IsStackAccess && IsAccessibleMemoryRange(pc, 4)) {
-    u32 inst = *(unsigned *)pc;
+  if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) {
+    u32 inst = *(unsigned *)sig.pc;
     u32 ra = (inst >> 16) & 0x1F;
     u32 opcd = inst >> 26;
     u32 xo = (inst >> 1) & 0x3FF;
@@ -75,9 +80,9 @@
   // then hitting the guard page or unmapped memory, like, for example,
   // unaligned memory access.
   if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
-    ReportStackOverflow(pc, sp, bp, context, addr);
+    ReportStackOverflow(sig);
   else
-    ReportSIGSEGV("SEGV", pc, sp, bp, context, addr);
+    ReportSIGSEGV("SEGV", sig);
 }
 
 // ---------------------- TSD ---------------- {{{1
diff --git a/lib/asan/asan_report.cc b/lib/asan/asan_report.cc
index 2ca11a3..ad75262 100644
--- a/lib/asan/asan_report.cc
+++ b/lib/asan/asan_report.cc
@@ -53,7 +53,7 @@
                      buffer, remaining);
     error_message_buffer[error_message_buffer_size - 1] = '\0';
     // FIXME: reallocate the buffer instead of truncating the message.
-    error_message_buffer_pos += remaining > length ? length : remaining;
+    error_message_buffer_pos += Min(remaining, length);
   }
 }
 
@@ -87,6 +87,8 @@
         return Cyan();
       case kAsanUserPoisonedMemoryMagic:
       case kAsanContiguousContainerOOBMagic:
+      case kAsanAllocaLeftMagic:
+      case kAsanAllocaRightMagic:
         return Blue();
       case kAsanStackUseAfterScopeMagic:
         return Magenta();
@@ -173,6 +175,8 @@
   PrintShadowByte(str, "  Intra object redzone:    ",
                   kAsanIntraObjectRedzone);
   PrintShadowByte(str, "  ASan internal:           ", kAsanInternalHeapMagic);
+  PrintShadowByte(str, "  Left alloca redzone:     ", kAsanAllocaLeftMagic);
+  PrintShadowByte(str, "  Right alloca redzone:    ", kAsanAllocaRightMagic);
 }
 
 void MaybeDumpInstructionBytes(uptr pc) {
@@ -643,38 +647,37 @@
   }
 };
 
-void ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr) {
+void ReportStackOverflow(const SignalContext &sig) {
   ScopedInErrorReport in_report;
   Decorator d;
   Printf("%s", d.Warning());
   Report(
       "ERROR: AddressSanitizer: stack-overflow on address %p"
       " (pc %p bp %p sp %p T%d)\n",
-      (void *)addr, (void *)pc, (void *)bp, (void *)sp,
+      (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp,
       GetCurrentTidOrInvalid());
   Printf("%s", d.EndWarning());
-  GET_STACK_TRACE_SIGNAL(pc, bp, context);
+  GET_STACK_TRACE_SIGNAL(sig);
   stack.Print();
   ReportErrorSummary("stack-overflow", &stack);
 }
 
-void ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
-                   void *context, uptr addr) {
+void ReportSIGSEGV(const char *description, const SignalContext &sig) {
   ScopedInErrorReport in_report;
   Decorator d;
   Printf("%s", d.Warning());
   Report(
       "ERROR: AddressSanitizer: %s on unknown address %p"
       " (pc %p bp %p sp %p T%d)\n",
-      description, (void *)addr, (void *)pc, (void *)bp, (void *)sp,
-      GetCurrentTidOrInvalid());
-  if (pc < GetPageSizeCached()) {
+      description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp,
+      (void *)sig.sp, GetCurrentTidOrInvalid());
+  if (sig.pc < GetPageSizeCached()) {
     Report("Hint: pc points to the zero page.\n");
   }
   Printf("%s", d.EndWarning());
-  GET_STACK_TRACE_SIGNAL(pc, bp, context);
+  GET_STACK_TRACE_SIGNAL(sig);
   stack.Print();
-  MaybeDumpInstructionBytes(pc);
+  MaybeDumpInstructionBytes(sig.pc);
   Printf("AddressSanitizer can not provide additional info.\n");
   ReportErrorSummary("SEGV", &stack);
 }
@@ -831,6 +834,9 @@
          "      old_mid : %p\n"
          "      new_mid : %p\n",
          beg, end, old_mid, new_mid);
+  uptr granularity = SHADOW_GRANULARITY;
+  if (!IsAligned(beg, granularity))
+    Report("ERROR: beg is not aligned by %d\n", granularity);
   stack->Print();
   ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
 }
@@ -934,6 +940,8 @@
 
 void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
                          uptr access_size) {
+  ENABLE_FRAME_POINTER;
+
   // Determine the error type.
   const char *bug_descr = "unknown-crash";
   if (AddrIsInMem(addr)) {
@@ -982,6 +990,10 @@
       case kAsanIntraObjectRedzone:
         bug_descr = "intra-object-overflow";
         break;
+      case kAsanAllocaLeftMagic:
+      case kAsanAllocaRightMagic:
+        bug_descr = "dynamic-stack-buffer-overflow";
+        break;
     }
   }
 
diff --git a/lib/asan/asan_report.h b/lib/asan/asan_report.h
index fd65bad..029c914 100644
--- a/lib/asan/asan_report.h
+++ b/lib/asan/asan_report.h
@@ -52,10 +52,8 @@
 void DescribeThread(AsanThreadContext *context);
 
 // Different kinds of error reports.
-void NORETURN
-    ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
-void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
-                            void *context, uptr addr);
+void NORETURN ReportStackOverflow(const SignalContext &sig);
+void NORETURN ReportSIGSEGV(const char *description, const SignalContext &sig);
 void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
                                           BufferedStackTrace *free_stack);
 void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc
index f87d84f..9126e71 100644
--- a/lib/asan/asan_rtl.cc
+++ b/lib/asan/asan_rtl.cc
@@ -21,6 +21,7 @@
 #include "asan_report.h"
 #include "asan_stack.h"
 #include "asan_stats.h"
+#include "asan_suppressions.h"
 #include "asan_thread.h"
 #include "sanitizer_common/sanitizer_atomic.h"
 #include "sanitizer_common/sanitizer_flags.h"
@@ -55,8 +56,6 @@
   }
   if (common_flags()->coverage)
     __sanitizer_cov_dump();
-  if (death_callback)
-    death_callback();
   if (flags()->abort_on_error)
     Abort();
   internal__exit(flags()->exitcode);
@@ -71,265 +70,9 @@
   Die();
 }
 
-// -------------------------- Flags ------------------------- {{{1
-static const int kDefaultMallocContextSize = 30;
-
-Flags asan_flags_dont_use_directly;  // use via flags().
-
-static const char *MaybeCallAsanDefaultOptions() {
-  return (&__asan_default_options) ? __asan_default_options() : "";
-}
-
-static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
-#ifdef ASAN_DEFAULT_OPTIONS
-// Stringize the macro value.
-# define ASAN_STRINGIZE(x) #x
-# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
-  return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
-#else
-  return "";
-#endif
-}
-
-static void ParseFlagsFromString(Flags *f, const char *str) {
-  CommonFlags *cf = common_flags();
-  ParseCommonFlagsFromString(cf, str);
-  CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
-  // Please write meaningful flag descriptions when adding new flags.
-  ParseFlag(str, &f->quarantine_size, "quarantine_size",
-            "Size (in bytes) of quarantine used to detect use-after-free "
-            "errors. Lower value may reduce memory usage but increase the "
-            "chance of false negatives.");
-  ParseFlag(str, &f->redzone, "redzone",
-            "Minimal size (in bytes) of redzones around heap objects. "
-            "Requirement: redzone >= 16, is a power of two.");
-  ParseFlag(str, &f->max_redzone, "max_redzone",
-            "Maximal size (in bytes) of redzones around heap objects.");
-  CHECK_GE(f->redzone, 16);
-  CHECK_GE(f->max_redzone, f->redzone);
-  CHECK_LE(f->max_redzone, 2048);
-  CHECK(IsPowerOfTwo(f->redzone));
-  CHECK(IsPowerOfTwo(f->max_redzone));
-
-  ParseFlag(str, &f->debug, "debug",
-      "If set, prints some debugging information and does additional checks.");
-  ParseFlag(str, &f->report_globals, "report_globals",
-      "Controls the way to handle globals (0 - don't detect buffer overflow on "
-      "globals, 1 - detect buffer overflow, 2 - print data about registered "
-      "globals).");
-
-  ParseFlag(str, &f->check_initialization_order,
-      "check_initialization_order",
-      "If set, attempts to catch initialization order issues.");
-
-  ParseFlag(str, &f->replace_str, "replace_str",
-      "If set, uses custom wrappers and replacements for libc string functions "
-      "to find more errors.");
-
-  ParseFlag(str, &f->replace_intrin, "replace_intrin",
-      "If set, uses custom wrappers for memset/memcpy/memmove intinsics.");
-  ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free",
-      "Ignore invalid free() calls to work around some bugs. Used on OS X "
-      "only.");
-  ParseFlag(str, &f->detect_stack_use_after_return,
-      "detect_stack_use_after_return",
-      "Enables stack-use-after-return checking at run-time.");
-  ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log",
-      "Minimum fake stack size log.");
-  ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log",
-      "Maximum fake stack size log.");
-  ParseFlag(str, &f->uar_noreserve, "uar_noreserve",
-      "Use mmap with 'norserve' flag to allocate fake stack.");
-  ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size",
-      "ASan allocator flag. max_malloc_fill_size is the maximal amount of "
-      "bytes that will be filled with malloc_fill_byte on malloc.");
-  ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte",
-      "Value used to fill the newly allocated memory.");
-  ParseFlag(str, &f->exitcode, "exitcode",
-      "Override the program exit status if the tool found an error.");
-  ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning",
-      "If set, user may manually mark memory regions as poisoned or "
-      "unpoisoned.");
-  ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying",
-      "Number of seconds to sleep between printing an error report and "
-      "terminating the program. Useful for debugging purposes (e.g. when one "
-      "needs to attach gdb).");
-
-  ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size",
-      "Allows the users to work around the bug in Nvidia drivers prior to "
-      "295.*.");
-
-  ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit",
-      "If set, explicitly unmaps the (huge) shadow at exit.");
-  ParseFlag(str, &f->abort_on_error, "abort_on_error",
-      "If set, the tool calls abort() instead of _exit() after printing the "
-      "error report.");
-  ParseFlag(str, &f->print_stats, "print_stats",
-      "Print various statistics after printing an error message or if "
-      "atexit=1.");
-  ParseFlag(str, &f->print_legend, "print_legend",
-      "Print the legend for the shadow bytes.");
-  ParseFlag(str, &f->atexit, "atexit",
-      "If set, prints ASan exit stats even after program terminates "
-      "successfully.");
-
-  ParseFlag(str, &f->allow_reexec, "allow_reexec",
-      "Allow the tool to re-exec the program. This may interfere badly with "
-      "the debugger.");
-
-  ParseFlag(str, &f->print_full_thread_history,
-      "print_full_thread_history",
-      "If set, prints thread creation stacks for the threads involved in the "
-      "report and their ancestors up to the main thread.");
-
-  ParseFlag(str, &f->poison_heap, "poison_heap",
-      "Poison (or not) the heap memory on [de]allocation. Zero value is useful "
-      "for benchmarking the allocator or instrumentator.");
-
-  ParseFlag(str, &f->poison_array_cookie, "poison_array_cookie",
-      "Poison (or not) the array cookie after operator new[].");
-
-  ParseFlag(str, &f->poison_partial, "poison_partial",
-      "If true, poison partially addressable 8-byte aligned words "
-      "(default=true). This flag affects heap and global buffers, but not "
-      "stack buffers.");
-
-  ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch",
-      "Report errors on malloc/delete, new/free, new/delete[], etc.");
-
-  ParseFlag(str, &f->new_delete_type_mismatch, "new_delete_type_mismatch",
-      "Report errors on mismatch betwen size of new and delete.");
-
-  ParseFlag(str, &f->strict_memcmp, "strict_memcmp",
-      "If true, assume that memcmp(p1, p2, n) always reads n bytes before "
-      "comparing p1 and p2.");
-
-  ParseFlag(str, &f->strict_init_order, "strict_init_order",
-      "If true, assume that dynamic initializers can never access globals from "
-      "other modules, even if the latter are already initialized.");
-
-  ParseFlag(str, &f->start_deactivated, "start_deactivated",
-      "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
-      "poisoning) to reduce memory consumption as much as possible, and "
-      "restores them to original values when the first instrumented module is "
-      "loaded into the process. This is mainly intended to be used on "
-      "Android. ");
-
-  ParseFlag(str, &f->detect_invalid_pointer_pairs,
-      "detect_invalid_pointer_pairs",
-      "If non-zero, try to detect operations like <, <=, >, >= and - on "
-      "invalid pointer pairs (e.g. when pointers belong to different objects). "
-      "The bigger the value the harder we try.");
-
-  ParseFlag(str, &f->detect_container_overflow,
-      "detect_container_overflow",
-      "If true, honor the container overflow  annotations. "
-      "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow");
-
-  ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation",
-            "If >=2, detect violation of One-Definition-Rule (ODR); "
-            "If ==1, detect ODR-violation only if the two variables "
-            "have different sizes");
-
-  ParseFlag(str, &f->dump_instruction_bytes, "dump_instruction_bytes",
-      "If true, dump 16 bytes starting at the instruction that caused SEGV");
-}
-
-void InitializeFlags(Flags *f, const char *env) {
-  CommonFlags *cf = common_flags();
-  SetCommonFlagsDefaults(cf);
-  cf->detect_leaks = CAN_SANITIZE_LEAKS;
-  cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
-  cf->malloc_context_size = kDefaultMallocContextSize;
-  cf->intercept_tls_get_addr = true;
-  cf->coverage = false;
-
-  internal_memset(f, 0, sizeof(*f));
-  f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
-  f->redzone = 16;
-  f->max_redzone = 2048;
-  f->debug = false;
-  f->report_globals = 1;
-  f->check_initialization_order = false;
-  f->replace_str = true;
-  f->replace_intrin = true;
-  f->mac_ignore_invalid_free = false;
-  f->detect_stack_use_after_return = false;  // Also needs the compiler flag.
-  f->min_uar_stack_size_log = 16;  // We can't do smaller anyway.
-  f->max_uar_stack_size_log = 20;  // 1Mb per size class, i.e. ~11Mb per thread.
-  f->uar_noreserve = false;
-  f->max_malloc_fill_size = 0x1000;  // By default, fill only the first 4K.
-  f->malloc_fill_byte = 0xbe;
-  f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
-  f->allow_user_poisoning = true;
-  f->sleep_before_dying = 0;
-  f->check_malloc_usable_size = true;
-  f->unmap_shadow_on_exit = false;
-  f->abort_on_error = false;
-  f->print_stats = false;
-  f->print_legend = true;
-  f->atexit = false;
-  f->allow_reexec = true;
-  f->print_full_thread_history = true;
-  f->poison_heap = true;
-  f->poison_array_cookie = true;
-  f->poison_partial = true;
-  // Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
-  // https://code.google.com/p/address-sanitizer/issues/detail?id=131
-  // https://code.google.com/p/address-sanitizer/issues/detail?id=309
-  // TODO(glider,timurrrr): Fix known issues and enable this back.
-  f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
-  f->new_delete_type_mismatch = true;
-  f->strict_memcmp = true;
-  f->strict_init_order = false;
-  f->start_deactivated = false;
-  f->detect_invalid_pointer_pairs = 0;
-  f->detect_container_overflow = true;
-  f->detect_odr_violation = 2;
-  f->dump_instruction_bytes = false;
-
-  // Override from compile definition.
-  ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition());
-
-  // Override from user-specified string.
-  ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
-  VReport(1, "Using the defaults from __asan_default_options: %s\n",
-          MaybeCallAsanDefaultOptions());
-
-  // Override from command line.
-  ParseFlagsFromString(f, env);
-  if (common_flags()->help) {
-    PrintFlagDescriptions();
-  }
-
-  if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) {
-    Report("%s: detect_leaks is not supported on this platform.\n",
-           SanitizerToolName);
-    cf->detect_leaks = false;
-  }
-
-  // Make "strict_init_order" imply "check_initialization_order".
-  // TODO(samsonov): Use a single runtime flag for an init-order checker.
-  if (f->strict_init_order) {
-    f->check_initialization_order = true;
-  }
-}
-
-// Parse flags that may change between startup and activation.
-// On Android they come from a system property.
-// On other platforms this is no-op.
-void ParseExtraActivationFlags() {
-  char buf[100];
-  GetExtraActivationFlags(buf, sizeof(buf));
-  ParseFlagsFromString(flags(), buf);
-  if (buf[0] != '\0')
-    VReport(1, "Extra activation flags: %s\n", buf);
-}
-
 // -------------------------- Globals --------------------- {{{1
 int asan_inited;
 bool asan_init_is_running;
-void (*death_callback)(void);
 
 #if !ASAN_FIXED_MAPPING
 uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;
@@ -343,7 +86,8 @@
 
 // ---------------------- mmap -------------------- {{{1
 // Reserve memory range [beg, end].
-static void ReserveShadowMemoryRange(uptr beg, uptr end) {
+// We need to use inclusive range because end+1 may not be representable.
+void ReserveShadowMemoryRange(uptr beg, uptr end) {
   CHECK_EQ((beg % GetPageSizeCached()), 0);
   CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
   uptr size = end - beg + 1;
@@ -354,6 +98,10 @@
            "Perhaps you're using ulimit -v\n", size);
     Abort();
   }
+  if (common_flags()->no_huge_pages_for_shadow)
+    NoHugePagesInRegion(beg, size);
+  if (common_flags()->use_madv_dontdump)
+    DontDumpShadowMemory(beg, size);
 }
 
 // --------------- LowLevelAllocateCallbac ---------- {{{1
@@ -499,7 +247,13 @@
 }
 
 static void ProtectGap(uptr a, uptr size) {
-  CHECK_EQ(a, (uptr)Mprotect(a, size));
+  void *res = Mprotect(a, size);
+  if (a == (uptr)res)
+    return;
+  Report("ERROR: Failed to protect the shadow gap. "
+         "ASan cannot proceed correctly. ABORTING.\n");
+  DumpProcessMap();
+  Die();
 }
 
 static void PrintAddressSpaceLayout() {
@@ -538,7 +292,7 @@
   Printf("\n");
   Printf("redzone=%zu\n", (uptr)flags()->redzone);
   Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
-  Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20);
+  Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
   Printf("malloc_context_size=%zu\n",
          (uptr)common_flags()->malloc_context_size);
 
@@ -560,8 +314,10 @@
 
   // Initialize flags. This must be done early, because most of the
   // initialization steps look at flags().
-  const char *options = GetEnv("ASAN_OPTIONS");
-  InitializeFlags(flags(), options);
+  InitializeFlags();
+
+  SetCanPoisonMemory(flags()->poison_heap);
+  SetMallocContextSize(common_flags()->malloc_context_size);
 
   InitializeHighMemEnd();
 
@@ -573,20 +329,11 @@
   SetCheckFailedCallback(AsanCheckFailed);
   SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
 
-  if (!flags()->start_deactivated)
-    ParseExtraActivationFlags();
-
   __sanitizer_set_report_path(common_flags()->log_path);
+
+  // Enable UAR detection, if required.
   __asan_option_detect_stack_use_after_return =
       flags()->detect_stack_use_after_return;
-  CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
-
-  if (options) {
-    VReport(1, "Parsed ASAN_OPTIONS: %s\n", options);
-  }
-
-  if (flags()->start_deactivated)
-    AsanStartDeactivated();
 
   // Re-exec ourselves if we need to set additional env or command line args.
   MaybeReexec();
@@ -617,8 +364,7 @@
   }
 #endif
 
-  if (common_flags()->verbosity)
-    PrintAddressSpaceLayout();
+  if (Verbosity()) PrintAddressSpaceLayout();
 
   DisableCoreDumperIfNecessary();
 
@@ -648,6 +394,8 @@
   } else {
     Report("Shadow memory range interleaves with an existing memory mapping. "
            "ASan cannot proceed correctly. ABORTING.\n");
+    Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
+           shadow_start, kHighShadowEnd);
     DumpProcessMap();
     Die();
   }
@@ -655,7 +403,12 @@
   AsanTSDInit(PlatformTSDDtor);
   InstallDeadlySignalHandlers(AsanOnSIGSEGV);
 
-  InitializeAllocator();
+  AllocatorOptions allocator_options;
+  allocator_options.SetFrom(flags(), common_flags());
+  InitializeAllocator(allocator_options);
+
+  MaybeStartBackgroudThread();
+  SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
 
   // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
   // should be set to 1 prior to initializing the threads.
@@ -665,32 +418,36 @@
   if (flags()->atexit)
     Atexit(asan_atexit);
 
-  if (common_flags()->coverage) {
-    __sanitizer_cov_init();
-    Atexit(__sanitizer_cov_dump);
-  }
+  InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+  // Now that ASan runtime is (mostly) initialized, deactivate it if
+  // necessary, so that it can be re-activated when requested.
+  if (flags()->start_deactivated)
+    AsanDeactivate();
 
   // interceptors
   InitTlsSize();
 
   // Create main thread.
-  AsanThread *main_thread = AsanThread::Create(0, 0);
-  CreateThreadContextArgs create_main_args = { main_thread, 0 };
-  u32 main_tid = asanThreadRegistry().CreateThread(
-      0, true, 0, &create_main_args);
-  CHECK_EQ(0, main_tid);
+  AsanThread *main_thread = AsanThread::Create(
+      /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
+      /* stack */ nullptr, /* detached */ true);
+  CHECK_EQ(0, main_thread->tid());
   SetCurrentThread(main_thread);
-  main_thread->ThreadStart(internal_getpid());
+  main_thread->ThreadStart(internal_getpid(),
+                           /* signal_thread_is_registered */ nullptr);
   force_interface_symbols();  // no-op.
   SanitizerInitializeUnwinder();
 
 #if CAN_SANITIZE_LEAKS
-  __lsan::InitCommonLsan(false);
+  __lsan::InitCommonLsan();
   if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
     Atexit(__lsan::DoLeakCheck);
   }
 #endif  // CAN_SANITIZE_LEAKS
 
+  InitializeSuppressions();
+
   VReport(1, "AddressSanitizer Init done\n");
 }
 
@@ -709,8 +466,7 @@
   AsanInitializer() {
     AsanCheckIncompatibleRT();
     AsanCheckDynamicRTPrereqs();
-    if (UNLIKELY(!asan_inited))
-      __asan_init();
+    AsanInitFromRtl();
   }
 };
 
@@ -722,13 +478,6 @@
 // ---------------------- Interface ---------------- {{{1
 using namespace __asan;  // NOLINT
 
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char* __asan_default_options() { return ""; }
-}  // extern "C"
-#endif
-
 int NOINLINE __asan_set_error_exit_code(int exit_code) {
   int old = flags()->exitcode;
   flags()->exitcode = exit_code;
@@ -762,7 +511,7 @@
 }
 
 void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
-  death_callback = callback;
+  SetUserDieCallback(callback);
 }
 
 // Initialize as requested from instrumented application code.
diff --git a/lib/asan/asan_stack.cc b/lib/asan/asan_stack.cc
index 8188f3b..cf7a587 100644
--- a/lib/asan/asan_stack.cc
+++ b/lib/asan/asan_stack.cc
@@ -13,6 +13,21 @@
 //===----------------------------------------------------------------------===//
 #include "asan_internal.h"
 #include "asan_stack.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+
+namespace __asan {
+
+static atomic_uint32_t malloc_context_size;
+
+void SetMallocContextSize(u32 size) {
+  atomic_store(&malloc_context_size, size, memory_order_release);
+}
+
+u32 GetMallocContextSize() {
+  return atomic_load(&malloc_context_size, memory_order_acquire);
+}
+
+}  // namespace __asan
 
 // ------------------ Interface -------------- {{{1
 
diff --git a/lib/asan/asan_stack.h b/lib/asan/asan_stack.h
index 8610ee4..122967a 100644
--- a/lib/asan/asan_stack.h
+++ b/lib/asan/asan_stack.h
@@ -21,6 +21,11 @@
 
 namespace __asan {
 
+static const u32 kDefaultMallocContextSize = 30;
+
+void SetMallocContextSize(u32 size);
+u32 GetMallocContextSize();
+
 // Get the stack trace with the given pc and bp.
 // The pc will be in the position 0 of the resulting stack trace.
 // The bp may refer to the current frame or to the caller's frame.
@@ -78,9 +83,10 @@
   GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0,           \
                                   common_flags()->fast_unwind_on_fatal)
 
-#define GET_STACK_TRACE_SIGNAL(pc, bp, context)                                \
+#define GET_STACK_TRACE_SIGNAL(sig)                                            \
   BufferedStackTrace stack;                                                    \
-  GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,     \
+  GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax,                      \
+                                  (sig).pc, (sig).bp, (sig).context,           \
                                   common_flags()->fast_unwind_on_fatal)
 
 #define GET_STACK_TRACE_FATAL_HERE                                \
@@ -92,9 +98,8 @@
 #define GET_STACK_TRACE_THREAD                                    \
   GET_STACK_TRACE(kStackTraceMax, true)
 
-#define GET_STACK_TRACE_MALLOC                                    \
-  GET_STACK_TRACE(common_flags()->malloc_context_size,            \
-                  common_flags()->fast_unwind_on_malloc)
+#define GET_STACK_TRACE_MALLOC                                                 \
+  GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc)
 
 #define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
 
diff --git a/lib/asan/asan_suppressions.cc b/lib/asan/asan_suppressions.cc
new file mode 100644
index 0000000..3f76e20
--- /dev/null
+++ b/lib/asan/asan_suppressions.cc
@@ -0,0 +1,114 @@
+//===-- asan_suppressions.cc ----------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Issue suppression and suppression-related functions.
+//===----------------------------------------------------------------------===//
+
+#include "asan_suppressions.h"
+
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+namespace __asan {
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kInterceptorName[] = "interceptor_name";
+static const char kInterceptorViaFunction[] = "interceptor_via_fun";
+static const char kInterceptorViaLibrary[] = "interceptor_via_lib";
+static const char kODRViolation[] = "odr_violation";
+static const char *kSuppressionTypes[] = {
+    kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary,
+    kODRViolation};
+
+extern "C" {
+#if SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__asan_default_suppressions();
+#else
+// No week hooks, provide empty implementation.
+const char *__asan_default_suppressions() { return ""; }
+#endif  // SANITIZER_SUPPORTS_WEAK_HOOKS
+}  // extern "C"
+
+void InitializeSuppressions() {
+  CHECK_EQ(nullptr, suppression_ctx);
+  suppression_ctx = new (suppression_placeholder)  // NOLINT
+      SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+  suppression_ctx->ParseFromFile(flags()->suppressions);
+  if (&__asan_default_suppressions)
+    suppression_ctx->Parse(__asan_default_suppressions());
+}
+
+bool IsInterceptorSuppressed(const char *interceptor_name) {
+  CHECK(suppression_ctx);
+  Suppression *s;
+  // Match "interceptor_name" suppressions.
+  return suppression_ctx->Match(interceptor_name, kInterceptorName, &s);
+}
+
+bool HaveStackTraceBasedSuppressions() {
+  CHECK(suppression_ctx);
+  return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) ||
+         suppression_ctx->HasSuppressionType(kInterceptorViaLibrary);
+}
+
+bool IsODRViolationSuppressed(const char *global_var_name) {
+  CHECK(suppression_ctx);
+  Suppression *s;
+  // Match "odr_violation" suppressions.
+  return suppression_ctx->Match(global_var_name, kODRViolation, &s);
+}
+
+bool IsStackTraceSuppressed(const StackTrace *stack) {
+  if (!HaveStackTraceBasedSuppressions())
+    return false;
+
+  CHECK(suppression_ctx);
+  Symbolizer *symbolizer = Symbolizer::GetOrInit();
+  Suppression *s;
+  for (uptr i = 0; i < stack->size && stack->trace[i]; i++) {
+    uptr addr = stack->trace[i];
+
+    if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) {
+      const char *module_name;
+      uptr module_offset;
+      // Match "interceptor_via_lib" suppressions.
+      if (symbolizer->GetModuleNameAndOffsetForPC(addr, &module_name,
+                                                  &module_offset) &&
+          suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s)) {
+        return true;
+      }
+    }
+
+    if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
+      SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
+      for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+        const char *function_name = cur->info.function;
+        if (!function_name) {
+          continue;
+        }
+        // Match "interceptor_via_fun" suppressions.
+        if (suppression_ctx->Match(function_name, kInterceptorViaFunction,
+                                   &s)) {
+          frames->ClearAll();
+          return true;
+        }
+      }
+      frames->ClearAll();
+    }
+  }
+  return false;
+}
+
+} // namespace __asan
diff --git a/lib/asan/asan_suppressions.h b/lib/asan/asan_suppressions.h
new file mode 100644
index 0000000..5246b4b
--- /dev/null
+++ b/lib/asan/asan_suppressions.h
@@ -0,0 +1,30 @@
+//===-- asan_suppressions.h -------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_suppressions.cc.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_SUPPRESSIONS_H
+#define ASAN_SUPPRESSIONS_H
+
+#include "asan_internal.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+namespace __asan {
+
+void InitializeSuppressions();
+bool IsInterceptorSuppressed(const char *interceptor_name);
+bool HaveStackTraceBasedSuppressions();
+bool IsStackTraceSuppressed(const StackTrace *stack);
+bool IsODRViolationSuppressed(const char *global_var_name);
+
+} // namespace __asan
+
+#endif // ASAN_SUPPRESSIONS_H
diff --git a/lib/asan/asan_thread.cc b/lib/asan/asan_thread.cc
index ce53bea..9af5706 100644
--- a/lib/asan/asan_thread.cc
+++ b/lib/asan/asan_thread.cc
@@ -27,6 +27,11 @@
 
 // AsanThreadContext implementation.
 
+struct CreateThreadContextArgs {
+  AsanThread *thread;
+  StackTrace *stack;
+};
+
 void AsanThreadContext::OnCreated(void *arg) {
   CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
   if (args->stack)
@@ -75,13 +80,17 @@
 
 // AsanThread implementation.
 
-AsanThread *AsanThread::Create(thread_callback_t start_routine,
-                               void *arg) {
+AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
+                               u32 parent_tid, StackTrace *stack,
+                               bool detached) {
   uptr PageSize = GetPageSizeCached();
   uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
   AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
   thread->start_routine_ = start_routine;
   thread->arg_ = arg;
+  CreateThreadContextArgs args = { thread, stack };
+  asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
+                                    parent_tid, &args);
 
   return thread;
 }
@@ -155,9 +164,13 @@
   AsanPlatformThreadInit();
 }
 
-thread_return_t AsanThread::ThreadStart(uptr os_id) {
+thread_return_t AsanThread::ThreadStart(
+    uptr os_id, atomic_uintptr_t *signal_thread_is_registered) {
   Init();
   asanThreadRegistry().StartThread(tid(), os_id, 0);
+  if (signal_thread_is_registered)
+    atomic_store(signal_thread_is_registered, 1, memory_order_release);
+
   if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
 
   if (!start_routine_) {
diff --git a/lib/asan/asan_thread.h b/lib/asan/asan_thread.h
index bf23728..9da136c 100644
--- a/lib/asan/asan_thread.h
+++ b/lib/asan/asan_thread.h
@@ -55,12 +55,14 @@
 // AsanThread are stored in TSD and destroyed when the thread dies.
 class AsanThread {
  public:
-  static AsanThread *Create(thread_callback_t start_routine, void *arg);
+  static AsanThread *Create(thread_callback_t start_routine, void *arg,
+                            u32 parent_tid, StackTrace *stack, bool detached);
   static void TSDDtor(void *tsd);
   void Destroy();
 
   void Init();  // Should be called from the thread itself.
-  thread_return_t ThreadStart(uptr os_id);
+  thread_return_t ThreadStart(uptr os_id,
+                              atomic_uintptr_t *signal_thread_is_registered);
 
   uptr stack_top() { return stack_top_; }
   uptr stack_bottom() { return stack_bottom_; }
@@ -166,11 +168,6 @@
   AsanThread *thread;
 };
 
-struct CreateThreadContextArgs {
-  AsanThread *thread;
-  StackTrace *stack;
-};
-
 // Returns a single instance of registry.
 ThreadRegistry &asanThreadRegistry();
 
diff --git a/lib/asan/asan_win.cc b/lib/asan/asan_win.cc
index 5303d1b..5b1d0da 100644
--- a/lib/asan/asan_win.cc
+++ b/lib/asan/asan_win.cc
@@ -27,13 +27,28 @@
 #include "sanitizer_common/sanitizer_mutex.h"
 
 extern "C" {
-  SANITIZER_INTERFACE_ATTRIBUTE
-  int __asan_should_detect_stack_use_after_return() {
-    __asan_init();
-    return __asan_option_detect_stack_use_after_return;
-  }
+SANITIZER_INTERFACE_ATTRIBUTE
+int __asan_should_detect_stack_use_after_return() {
+  __asan_init();
+  return __asan_option_detect_stack_use_after_return;
 }
 
+// We don't have a direct equivalent of weak symbols when using MSVC, but we can
+// use the /alternatename directive to tell the linker to default a specific
+// symbol to a specific value, which works nicely for allocator hooks and
+// __asan_default_options().
+void __sanitizer_default_malloc_hook(void *ptr, uptr size) { }
+void __sanitizer_default_free_hook(void *ptr) { }
+const char* __asan_default_default_options() { return ""; }
+const char* __asan_default_default_suppressions() { return ""; }
+void __asan_default_on_error() {}
+#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook")  // NOLINT
+#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook")      // NOLINT
+#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options")    // NOLINT
+#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions")    // NOLINT
+#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error")                  // NOLINT
+}  // extern "C"
+
 namespace __asan {
 
 // ---------------------- TSD ---------------- {{{1
@@ -60,6 +75,10 @@
   AsanThread::TSDDtor(tsd);
 }
 // ---------------------- Various stuff ---------------- {{{1
+void DisableReexec() {
+  // No need to re-exec on Windows.
+}
+
 void MaybeReexec() {
   // No need to re-exec on Windows.
 }
@@ -89,15 +108,26 @@
 
 static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
 
+SignalContext SignalContext::Create(void *siginfo, void *context) {
+  EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD*)siginfo;
+  CONTEXT *context_record = (CONTEXT*)context;
+
+  uptr pc = (uptr)exception_record->ExceptionAddress;
+#ifdef _WIN64
+  uptr bp = (uptr)context_record->Rbp;
+  uptr sp = (uptr)context_record->Rsp;
+#else
+  uptr bp = (uptr)context_record->Ebp;
+  uptr sp = (uptr)context_record->Esp;
+#endif
+  uptr access_addr = exception_record->ExceptionInformation[1];
+
+  return SignalContext(context, access_addr, pc, sp, bp);
+}
+
 static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
   EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
   CONTEXT *context = info->ContextRecord;
-  uptr pc = (uptr)exception_record->ExceptionAddress;
-#ifdef _WIN64
-  uptr bp = (uptr)context->Rbp, sp = (uptr)context->Rsp;
-#else
-  uptr bp = (uptr)context->Ebp, sp = (uptr)context->Esp;
-#endif
 
   if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
       exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
@@ -105,8 +135,8 @@
         (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
             ? "access-violation"
             : "in-page-error";
-    uptr access_addr = exception_record->ExceptionInformation[1];
-    ReportSIGSEGV(description, pc, sp, bp, context, access_addr);
+    SignalContext sig = SignalContext::Create(exception_record, context);
+    ReportSIGSEGV(description, sig);
   }
 
   // FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
diff --git a/lib/asan/asan_win_dll_thunk.cc b/lib/asan/asan_win_dll_thunk.cc
index b38a2d1..7b94302 100644
--- a/lib/asan/asan_win_dll_thunk.cc
+++ b/lib/asan/asan_win_dll_thunk.cc
@@ -294,7 +294,41 @@
 INTERFACE_FUNCTION(__asan_stack_free_9)
 INTERFACE_FUNCTION(__asan_stack_free_10)
 
+// FIXME: we might want to have a sanitizer_win_dll_thunk?
+INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_cov)
+INTERFACE_FUNCTION(__sanitizer_cov_dump)
+INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
+INTERFACE_FUNCTION(__sanitizer_cov_init)
 INTERFACE_FUNCTION(__sanitizer_cov_module_init)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
+INTERFACE_FUNCTION(__sanitizer_cov_with_check)
+INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
+INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_heap_size)
+INTERFACE_FUNCTION(__sanitizer_get_ownership)
+INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
+INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
+INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
+INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
+INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
+INTERFACE_FUNCTION(__sanitizer_ptr_sub)
+INTERFACE_FUNCTION(__sanitizer_report_error_summary)
+INTERFACE_FUNCTION(__sanitizer_reset_coverage)
+INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
+INTERFACE_FUNCTION(__sanitizer_set_death_callback)
+INTERFACE_FUNCTION(__sanitizer_set_report_path)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
+INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
 
 // TODO(timurrrr): Add more interface functions on the as-needed basis.
 
diff --git a/lib/asan/asan_win_dynamic_runtime_thunk.cc b/lib/asan/asan_win_dynamic_runtime_thunk.cc
index 3a4de7d..1945614 100644
--- a/lib/asan/asan_win_dynamic_runtime_thunk.cc
+++ b/lib/asan/asan_win_dynamic_runtime_thunk.cc
@@ -23,10 +23,11 @@
 // Using #ifdef rather than relying on Makefiles etc.
 // simplifies the build procedure.
 #ifdef ASAN_DYNAMIC_RUNTIME_THUNK
-extern "C" {
-__declspec(dllimport) int __asan_set_seh_filter();
-__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
+#include <windows.h>
+#include <psapi.h>
 
+extern "C" {
+////////////////////////////////////////////////////////////////////////////////
 // Define a copy of __asan_option_detect_stack_use_after_return that should be
 // used when linking an MD runtime with a set of object files on Windows.
 //
@@ -37,16 +38,82 @@
 // with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
 // just to work around this issue, let's clone the a variable that is
 // constant after initialization anyways.
+__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
 int __asan_option_detect_stack_use_after_return =
     __asan_should_detect_stack_use_after_return();
+}
 
-// Set the ASan-specific SEH handler at the end of CRT initialization of each
-// module (see asan_win.cc for the details).
+////////////////////////////////////////////////////////////////////////////////
+// For some reason, the MD CRT doesn't call the C/C++ terminators as MT does.
+// To work around this, for each DLL we schedule a call to
+// UnregisterGlobalsInRange atexit() specifying the address range of the DLL
+// image to unregister globals in that range.   We don't do the same
+// for the main module (.exe) as the asan_globals.cc allocator is destroyed
+// by the time UnregisterGlobalsInRange is executed.
+// See PR22545 for the details.
+namespace __asan {
+__declspec(dllimport)
+void UnregisterGlobalsInRange(void *beg, void *end);
+}
+
+namespace {
+void *this_module_base, *this_module_end;
+
+void UnregisterGlobals() {
+  __asan::UnregisterGlobalsInRange(this_module_base, this_module_end);
+}
+
+int ScheduleUnregisterGlobals() {
+  HMODULE this_module = 0;
+  // Increments the reference counter of the DLL module, so need to call
+  // FreeLibrary later.
+  if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
+                         (LPCTSTR)&UnregisterGlobals, &this_module))
+    return 1;
+
+  // Skip the main module.
+  if (this_module == GetModuleHandle(0))
+    return 0;
+
+  MODULEINFO mi;
+  bool success =
+      GetModuleInformation(GetCurrentProcess(), this_module, &mi, sizeof(mi));
+  if (!FreeLibrary(this_module))
+    return 2;
+  if (!success)
+    return 3;
+
+  this_module_base = mi.lpBaseOfDll;
+  this_module_end = (char*)mi.lpBaseOfDll + mi.SizeOfImage;
+
+  return atexit(UnregisterGlobals);
+}
+}  // namespace
+
+///////////////////////////////////////////////////////////////////////////////
+// ASan SEH handling.
+extern "C" __declspec(dllimport) int __asan_set_seh_filter();
+static int SetSEHFilter() { return __asan_set_seh_filter(); }
+
+///////////////////////////////////////////////////////////////////////////////
+// We schedule some work at start-up by placing callbacks to our code to the
+// list of CRT C initializers.
+//
+// First, declare sections we'll be using:
+#pragma section(".CRT$XID", long, read)  // NOLINT
+#pragma section(".CRT$XIZ", long, read)  // NOLINT
+
+// We need to call 'atexit(UnregisterGlobals);' after atexit() is initialized
+// (.CRT$XIC) but before the C++ constructors (.CRT$XCA).
+__declspec(allocate(".CRT$XID"))
+static int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
+
+// We need to set the ASan-specific SEH handler at the end of CRT initialization
+// of each module (see also asan_win.cc).
 //
 // Unfortunately, putting a pointer to __asan_set_seh_filter into
 // __asan_intercept_seh gets optimized out, so we have to use an extra function.
-static int SetSEHFilter() { return __asan_set_seh_filter(); }
-#pragma section(".CRT$XIZ", long, read)  // NOLINT
-__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
-}
+extern "C" __declspec(allocate(".CRT$XIZ"))
+int (*__asan_seh_interceptor)() = SetSEHFilter;
+
 #endif // ASAN_DYNAMIC_RUNTIME_THUNK
diff --git a/lib/asan/scripts/asan_device_setup b/lib/asan/scripts/asan_device_setup
index a620f51..104e07b 100755
--- a/lib/asan/scripts/asan_device_setup
+++ b/lib/asan/scripts/asan_device_setup
@@ -18,6 +18,7 @@
 extra_options=
 device=
 lib=
+use_su=0
 
 function usage {
     echo "usage: $0 [--revert] [--device device-id] [--lib path] [--extra-options options]"
@@ -26,13 +27,70 @@
     echo "  --extra-options: Extra ASAN_OPTIONS."
     echo "  --device: Install to the given device. Use 'adb devices' to find"
     echo "            device-id."
+    echo "  --use-su: Use 'su -c' prefix for every adb command instead of using"
+    echo "            'adb root' once."
     echo
     exit 1
 }
 
+function adb_push {
+  if [ $use_su -eq 0 ]; then
+    $ADB push "$1" "$2"
+  else
+    local FILENAME=$(basename $1)
+    $ADB push "$1" "/data/local/tmp/$FILENAME"
+    $ADB shell su -c "rm \\\"$2/$FILENAME\\\"" >&/dev/null
+    $ADB shell su -c "cat \\\"/data/local/tmp/$FILENAME\\\" > \\\"$2/$FILENAME\\\""
+    $ADB shell su -c "rm \\\"/data/local/tmp/$FILENAME\\\""
+  fi
+}
+
+function adb_remount {
+  if [ $use_su -eq 0 ]; then
+    $ADB remount
+  else
+    local STORAGE=`$ADB shell mount | grep /system | cut -d ' ' -f1`
+    if [ "$STORAGE" != "" ]; then
+      echo Remounting $STORAGE at /system
+      $ADB shell su -c "mount -o remount,rw $STORAGE /system"
+    else
+      echo Failed to get storage device name for "/system" mount point
+    fi
+  fi
+}
+
+function adb_shell {
+  if [ $use_su -eq 0 ]; then
+    $ADB shell $@
+  else
+    $ADB shell su -c "$*"
+  fi
+}
+
+function adb_root {
+  if [ $use_su -eq 0 ]; then
+    $ADB root
+  fi
+}
+
+function adb_wait_for_device {
+  $ADB wait-for-device
+}
+
+function adb_pull {
+  if [ $use_su -eq 0 ]; then
+    $ADB pull "$1" "$2"
+  else
+    local FILENAME=$(basename $1)
+    $ADB shell rm "/data/local/tmp/$FILENAME" >&/dev/null
+    $ADB shell su -c "[ -f \\\"$1\\\" ] && cat \\\"$1\\\" > \\\"/data/local/tmp/$FILENAME\\\" && chown root.shell \\\"/data/local/tmp/$FILENAME\\\" && chmod 755 \\\"/data/local/tmp/$FILENAME\\\"" &&
+    $ADB pull "/data/local/tmp/$FILENAME" "$2" >&/dev/null && $ADB shell "rm \"/data/local/tmp/$FILENAME\""
+  fi
+}
+
 function get_device_arch { # OUTVAR
     local _outvar=$1
-    local _ABI=$($ADB shell getprop ro.product.cpu.abi)
+    local _ABI=$(adb_shell getprop ro.product.cpu.abi)
     local _ARCH=
     if [[ $_ABI == x86* ]]; then
         _ARCH=i686
@@ -74,6 +132,9 @@
       fi
       device="$1"
       ;;
+    --use-su)
+      use_su=1
+      ;;
     *)
       usage
       ;;
@@ -86,11 +147,25 @@
     ADB="$ADB -s $device"
 fi
 
+if [ $use_su -eq 1 ]; then
+  # Test if 'su' is present on the device
+  SU_TEST_OUT=`$ADB shell su -c "echo foo" 2>&1 | sed 's/\r$//'`
+  if [ $? != 0 -o "$SU_TEST_OUT" != "foo" ]; then
+    echo "ERROR: Cannot use 'su -c':"
+    echo "$ adb shell su -c \"echo foo\""
+    echo $SU_TEST_OUT
+    echo "Check that 'su' binary is correctly installed on the device or omit"
+    echo "            --use-su flag"
+    exit 1
+  fi
+fi
+
 echo '>> Remounting /system rw'
-$ADB root
-$ADB wait-for-device
-$ADB remount
-$ADB wait-for-device
+adb_wait_for_device
+adb_root
+adb_wait_for_device
+adb_remount
+adb_wait_for_device
 
 get_device_arch ARCH
 echo "Target architecture: $ARCH"
@@ -99,22 +174,24 @@
 if [[ x$revert == xyes ]]; then
     echo '>> Uninstalling ASan'
 
-    if ! $ADB shell readlink /system/bin/app_process | grep 'app_process' >&/dev/null; then
+    if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then
         echo '>> Pre-L device detected.'
-        $ADB shell mv /system/bin/app_process.real /system/bin/app_process
-        $ADB shell rm /system/bin/asanwrapper
-        $ADB shell rm /system/lib/$ASAN_RT
+        adb_shell mv /system/bin/app_process.real /system/bin/app_process
+        adb_shell rm /system/bin/asanwrapper
     else
-        $ADB shell rm /system/bin/app_process.wrap
-        $ADB shell rm /system/bin/asanwrapper
-        $ADB shell rm /system/lib/$ASAN_RT
-        $ADB shell rm /system/bin/app_process
-        $ADB shell ln -s /system/bin/app_process32 /system/bin/app_process
+        adb_shell rm /system/bin/app_process.wrap
+        adb_shell rm /system/bin/asanwrapper
+        adb_shell rm /system/bin/app_process
+        adb_shell ln -s /system/bin/app_process32 /system/bin/app_process
     fi
 
     echo '>> Restarting shell'
-    $ADB shell stop
-    $ADB shell start
+    adb_shell stop
+    adb_shell start
+
+    # Remove the library on the last step to give a chance to the 'su' binary to
+    # be executed without problem.
+    adb_shell rm /system/lib/$ASAN_RT
 
     echo '>> Done'
     exit 0
@@ -145,28 +222,28 @@
 TMPDIR="$TMPDIRBASE/new"
 mkdir "$TMPDIROLD"
 
-RELEASE=$($ADB shell getprop ro.build.version.release)
+RELEASE=$(adb_shell getprop ro.build.version.release)
 PRE_L=0
 if echo "$RELEASE" | grep '^4\.' >&/dev/null; then
     PRE_L=1
 fi
 
-if ! $ADB shell readlink /system/bin/app_process | grep 'app_process' >&/dev/null; then
+if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then
 
-    if $ADB pull /system/bin/app_process.real /dev/null >&/dev/null; then
+    if adb_pull /system/bin/app_process.real /dev/null >&/dev/null; then
         echo '>> Old-style ASan installation detected. Reverting.'
-        $ADB shell mv /system/bin/app_process.real /system/bin/app_process
+        adb_shell mv /system/bin/app_process.real /system/bin/app_process
     fi
 
     echo '>> Pre-L device detected. Setting up app_process symlink.'
-    $ADB shell mv /system/bin/app_process /system/bin/app_process32
-    $ADB shell ln -s /system/bin/app_process32 /system/bin/app_process
+    adb_shell mv /system/bin/app_process /system/bin/app_process32
+    adb_shell ln -s /system/bin/app_process32 /system/bin/app_process
 fi
 
 echo '>> Copying files from the device'
-$ADB pull /system/bin/app_process.wrap "$TMPDIROLD" || true
-$ADB pull /system/bin/asanwrapper "$TMPDIROLD" || true
-$ADB pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true
+adb_pull /system/bin/app_process.wrap "$TMPDIROLD" || true
+adb_pull /system/bin/asanwrapper "$TMPDIROLD" || true
+adb_pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true
 cp -r "$TMPDIROLD" "$TMPDIR"
 
 if [[ -f "$TMPDIR/app_process.wrap" ]]; then
@@ -184,7 +261,7 @@
 ASAN_OPTIONS=start_deactivated=1,alloc_dealloc_mismatch=0
 
 # On Android-L not allowing user segv handler breaks some applications.
-if $ADB shell 'echo $LD_PRELOAD' | grep libsigchain.so >&/dev/null; then
+if [[ PRE_L -eq 0 ]]; then
     ASAN_OPTIONS="$ASAN_OPTIONS,allow_user_segv_handler=1"
 fi
 
@@ -212,52 +289,52 @@
 
 if ! ( cd "$TMPDIRBASE" && diff -qr old/ new/ ) ; then
     echo '>> Pushing files to the device'
-    $ADB push "$TMPDIR/$ASAN_RT" /system/lib/
-    $ADB push "$TMPDIR/app_process.wrap" /system/bin/app_process.wrap
-    $ADB push "$TMPDIR/asanwrapper" /system/bin/asanwrapper
+    adb_push "$TMPDIR/$ASAN_RT" /system/lib/
+    adb_push "$TMPDIR/app_process.wrap" /system/bin
+    adb_push "$TMPDIR/asanwrapper" /system/bin
 
-    $ADB shell rm /system/bin/app_process
-    $ADB shell ln -s /system/bin/app_process.wrap /system/bin/app_process
+    adb_shell rm /system/bin/app_process
+    adb_shell ln -s /system/bin/app_process.wrap /system/bin/app_process
 
-    $ADB shell chown root.shell \
+    adb_shell chown root.shell \
         /system/lib/"$ASAN_RT" \
         /system/bin/app_process.wrap \
         /system/bin/asanwrapper
-    $ADB shell chmod 644 \
+    adb_shell chmod 644 \
         /system/lib/"$ASAN_RT"
-    $ADB shell chmod 755 \
+    adb_shell chmod 755 \
         /system/bin/app_process.wrap \
         /system/bin/asanwrapper
 
     # Make SELinux happy by keeping app_process wrapper and the shell
     # it runs on in zygote domain.
     ENFORCING=0
-    if $ADB shell getenforce | grep Enforcing >/dev/null; then
+    if adb_shell getenforce | grep Enforcing >/dev/null; then
         # Sometimes shell is not allowed to change file contexts.
         # Temporarily switch to permissive.
         ENFORCING=1
-        $ADB shell setenforce 0
+        adb_shell setenforce 0
     fi
 
-    $ADB shell cp /system/bin/sh /system/bin/sh-from-zygote
+    adb_shell cp /system/bin/sh /system/bin/sh-from-zygote
 
     if [[ PRE_L -eq 1 ]]; then
         CTX=u:object_r:system_file:s0
     else
         CTX=u:object_r:zygote_exec:s0
     fi
-    $ADB shell chcon $CTX \
+    adb_shell chcon $CTX \
         /system/bin/sh-from-zygote \
         /system/bin/app_process.wrap \
         /system/bin/app_process32
 
     if [ $ENFORCING == 1 ]; then
-        $ADB shell setenforce 1
+        adb_shell setenforce 1
     fi
 
     echo '>> Restarting shell (asynchronous)'
-    $ADB shell stop
-    $ADB shell start
+    adb_shell stop
+    adb_shell start
 
     echo '>> Please wait until the device restarts'
 else
diff --git a/lib/asan/scripts/asan_symbolize.py b/lib/asan/scripts/asan_symbolize.py
index 76de60a..59fceaa 100755
--- a/lib/asan/scripts/asan_symbolize.py
+++ b/lib/asan/scripts/asan_symbolize.py
@@ -11,11 +11,9 @@
 import bisect
 import getopt
 import os
-import pty
 import re
 import subprocess
 import sys
-import termios
 
 symbolizers = {}
 DEBUG = False
@@ -66,10 +64,12 @@
 
 
 class LLVMSymbolizer(Symbolizer):
-  def __init__(self, symbolizer_path, addr):
+  def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]):
     super(LLVMSymbolizer, self).__init__()
     self.symbolizer_path = symbolizer_path
-    self.default_arch = guess_arch(addr)
+    self.default_arch = default_arch
+    self.system = system
+    self.dsym_hints = dsym_hints
     self.pipe = self.open_llvm_symbolizer()
 
   def open_llvm_symbolizer(self):
@@ -79,6 +79,9 @@
            '--functions=short',
            '--inlining=true',
            '--default-arch=%s' % self.default_arch]
+    if self.system == 'Darwin':
+      for hint in self.dsym_hints:
+        cmd.append('--dsym-hint=%s' % hint)
     if DEBUG:
       print ' '.join(cmd)
     try:
@@ -94,7 +97,7 @@
       return None
     result = []
     try:
-      symbolizer_input = '%s %s' % (binary, offset)
+      symbolizer_input = '"%s" %s' % (binary, offset)
       if DEBUG:
         print symbolizer_input
       print >> self.pipe.stdin, symbolizer_input
@@ -116,14 +119,14 @@
     return result
 
 
-def LLVMSymbolizerFactory(system, addr):
+def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]):
   symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
   if not symbolizer_path:
     symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH')
     if not symbolizer_path:
       # Assume llvm-symbolizer is in PATH.
       symbolizer_path = 'llvm-symbolizer'
-  return LLVMSymbolizer(symbolizer_path, addr)
+  return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints)
 
 
 class Addr2LineSymbolizer(Symbolizer):
@@ -166,6 +169,9 @@
   output.  Uses pty to trick the child into providing unbuffered output.
   """
   def __init__(self, args, close_stderr=False):
+    # Local imports so that the script can start on Windows.
+    import pty
+    import termios
     pid, fd = pty.fork()
     if pid == 0:
       # We're the child. Transfer control to command.
@@ -335,26 +341,55 @@
 
 
 class SymbolizationLoop(object):
-  def __init__(self, binary_name_filter=None):
-    # Used by clients who may want to supply a different binary name.
-    # E.g. in Chrome several binaries may share a single .dSYM.
-    self.binary_name_filter = binary_name_filter
-    self.system = os.uname()[0]
-    if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
-      raise Exception('Unknown system')
-    self.llvm_symbolizer = None
-    self.frame_no = 0
+  def __init__(self, binary_name_filter=None, dsym_hint_producer=None):
+    if sys.platform == 'win32':
+      # ASan on Windows uses dbghelp.dll to symbolize in-process, which works
+      # even in sandboxed processes.  Nothing needs to be done here.
+      self.process_line = self.process_line_echo
+    else:
+      # Used by clients who may want to supply a different binary name.
+      # E.g. in Chrome several binaries may share a single .dSYM.
+      self.binary_name_filter = binary_name_filter
+      self.dsym_hint_producer = dsym_hint_producer
+      self.system = os.uname()[0]
+      if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
+        raise Exception('Unknown system')
+      self.llvm_symbolizers = {}
+      self.last_llvm_symbolizer = None
+      self.dsym_hints = set([])
+      self.frame_no = 0
+      self.process_line = self.process_line_posix
 
   def symbolize_address(self, addr, binary, offset):
-    # Initialize llvm-symbolizer lazily.
-    if not self.llvm_symbolizer:
-      self.llvm_symbolizer = LLVMSymbolizerFactory(self.system, addr)
+    # On non-Darwin (i.e. on platforms without .dSYM debug info) always use
+    # a single symbolizer binary.
+    # On Darwin, if the dsym hint producer is present:
+    #  1. check whether we've seen this binary already; if so,
+    #     use |llvm_symbolizers[binary]|, which has already loaded the debug
+    #     info for this binary (might not be the case for
+    #     |last_llvm_symbolizer|);
+    #  2. otherwise check if we've seen all the hints for this binary already;
+    #     if so, reuse |last_llvm_symbolizer| which has the full set of hints;
+    #  3. otherwise create a new symbolizer and pass all currently known
+    #     .dSYM hints to it.
+    if not binary in self.llvm_symbolizers:
+      use_new_symbolizer = True
+      if self.system == 'Darwin' and self.dsym_hint_producer:
+        dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
+        use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
+        self.dsym_hints |= dsym_hints_for_binary
+      if self.last_llvm_symbolizer and not use_new_symbolizer:
+          self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
+      else:
+        self.last_llvm_symbolizer = LLVMSymbolizerFactory(
+            self.system, guess_arch(addr), self.dsym_hints)
+        self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
     # Use the chain of symbolizers:
     # Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
     # (fall back to next symbolizer if the previous one fails).
     if not binary in symbolizers:
       symbolizers[binary] = ChainSymbolizer(
-          [BreakpadSymbolizerFactory(binary), self.llvm_symbolizer])
+          [BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]])
     result = symbolizers[binary].symbolize(addr, binary, offset)
     if result is None:
       # Initialize system symbolizer only if other symbolizers failed.
@@ -377,14 +412,14 @@
 
   def process_logfile(self):
     self.frame_no = 0
-    while True:
-      line = logfile.readline()
-      if not line:
-        break
+    for line in logfile:
       processed = self.process_line(line)
       print '\n'.join(processed)
 
-  def process_line(self, line):
+  def process_line_echo(self, line):
+    return [line.rstrip()]
+
+  def process_line_posix(self, line):
     self.current_line = line.rstrip()
     #0 0x7f6e35cf2e45  (/blah/foo.so+0x11fe45)
     stack_trace_line_format = (
@@ -409,20 +444,23 @@
 
 
 if __name__ == '__main__':
-  parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
-  description='ASan symbolization script',
-  epilog='''Example of use:
-  asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" -s "$HOME/SymbolFiles" < asan.log''')
+  parser = argparse.ArgumentParser(
+      formatter_class=argparse.RawDescriptionHelpFormatter,
+      description='ASan symbolization script',
+      epilog='Example of use:\n'
+             'asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" '
+             '-s "$HOME/SymbolFiles" < asan.log')
   parser.add_argument('path_to_cut', nargs='*',
-    help='pattern to be cut from the result file path ')
+                      help='pattern to be cut from the result file path ')
   parser.add_argument('-d','--demangle', action='store_true',
-    help='demangle function names')
+                      help='demangle function names')
   parser.add_argument('-s', metavar='SYSROOT',
-    help='set path to sysroot for sanitized binaries')
+                      help='set path to sysroot for sanitized binaries')
   parser.add_argument('-c', metavar='CROSS_COMPILE',
-    help='set prefix for binutils')
-  parser.add_argument('-l','--logfile', default=sys.stdin, type=argparse.FileType('r'),
-    help='set log file name to parse, default is stdin')
+                      help='set prefix for binutils')
+  parser.add_argument('-l','--logfile', default=sys.stdin,
+                      type=argparse.FileType('r'),
+                      help='set log file name to parse, default is stdin')
   args = parser.parse_args()
   if args.path_to_cut:
     fix_filename_patterns = args.path_to_cut
diff --git a/lib/asan/tests/CMakeLists.txt b/lib/asan/tests/CMakeLists.txt
index 7b36371..513d128 100644
--- a/lib/asan/tests/CMakeLists.txt
+++ b/lib/asan/tests/CMakeLists.txt
@@ -30,7 +30,8 @@
   -fno-rtti
   -O2
   -Wno-format
-  -Werror=sign-compare)
+  -Werror=sign-compare
+  -Wno-non-virtual-dtor)
 append_list_if(COMPILER_RT_HAS_WVARIADIC_MACROS_FLAG -Wno-variadic-macros ASAN_UNITTEST_COMMON_CFLAGS)
 
 # -gline-tables-only must be enough for ASan, so use it if possible.
@@ -46,6 +47,11 @@
   -DASAN_HAS_EXCEPTIONS=1
   -DASAN_UAR=0)
 
+if(APPLE)
+  list(APPEND ASAN_UNITTEST_COMMON_CFLAGS ${DARWIN_osx_CFLAGS})
+  list(APPEND ASAN_UNITTEST_COMMON_LINKFLAGS ${DARWIN_osx_LINKFLAGS})
+endif()
+
 set(ASAN_BLACKLIST_FILE "${CMAKE_CURRENT_SOURCE_DIR}/asan_test.ignore")
 set(ASAN_UNITTEST_INSTRUMENTED_CFLAGS
   ${ASAN_UNITTEST_COMMON_CFLAGS}
@@ -117,7 +123,7 @@
 # Link ASan unit test for a given architecture from a set
 # of objects in with given linker flags.
 macro(add_asan_test test_suite test_name arch kind)
-  parse_arguments(TEST "OBJECTS;LINKFLAGS" "WITH_TEST_RUNTIME" ${ARGN})
+  parse_arguments(TEST "OBJECTS;LINKFLAGS;SUBDIR" "WITH_TEST_RUNTIME" ${ARGN})
   get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS)
   set(TEST_DEPS ${TEST_OBJECTS})
   if(NOT COMPILER_RT_STANDALONE_BUILD)
@@ -132,6 +138,7 @@
     endif()
   endif()
   add_compiler_rt_test(${test_suite} ${test_name}
+                       SUBDIR ${TEST_SUBDIR}
                        OBJECTS ${TEST_OBJECTS}
                        DEPS ${TEST_DEPS}
                        LINK_FLAGS ${TEST_LINKFLAGS}
@@ -141,6 +148,11 @@
 # Main AddressSanitizer unit tests.
 add_custom_target(AsanUnitTests)
 set_target_properties(AsanUnitTests PROPERTIES FOLDER "ASan unit tests")
+# AddressSanitizer unit tests with dynamic runtime (on platforms where it's
+# not the default).
+add_custom_target(AsanDynamicUnitTests)
+set_target_properties(AsanDynamicUnitTests
+  PROPERTIES FOLDER "ASan unit tests with dynamic runtime")
 # ASan benchmarks (not actively used now).
 add_custom_target(AsanBenchmarks)
 set_target_properties(AsanBenchmarks PROPERTIES FOLDER "Asan benchmarks")
@@ -182,11 +194,15 @@
     asan_compile(ASAN_INST_TEST_OBJECTS asan_mac_test_helpers.mm ${arch} ${kind}
                  ${ASAN_UNITTEST_INSTRUMENTED_CFLAGS} -ObjC ${ARGN})
   endif()
-  add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Test" ${arch} ${kind}
+  file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/default")
+  add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Test"
+                ${arch} ${kind} SUBDIR "default"
                 OBJECTS ${ASAN_INST_TEST_OBJECTS}
                 LINKFLAGS ${ASAN_UNITTEST_INSTRUMENTED_LINKFLAGS})
-  if(COMPILER_RT_BUILD_SHARED_ASAN)
-    add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Dynamic-Test" ${arch} ${kind}
+  if(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME)
+    file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/dynamic")
+    add_asan_test(AsanDynamicUnitTests "Asan-${arch}${kind}-Dynamic-Test"
+                  ${arch} ${kind} SUBDIR "dynamic"
                   OBJECTS ${ASAN_INST_TEST_OBJECTS}
                   LINKFLAGS ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS})
   endif()
@@ -220,7 +236,8 @@
     asan_compile(ASAN_NOINST_TEST_OBJECTS ${src} ${arch} ${kind}
                  ${ASAN_UNITTEST_COMMON_CFLAGS} ${ARGN})
   endforeach()
-  add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Noinst-Test" ${arch} ${kind}
+  add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Noinst-Test"
+                ${arch} ${kind} SUBDIR "default"
                 OBJECTS ${ASAN_NOINST_TEST_OBJECTS}
                 LINKFLAGS ${ASAN_UNITTEST_NOINST_LINKFLAGS}
                 WITH_TEST_RUNTIME)
@@ -231,14 +248,10 @@
     asan_compile(ASAN_BENCHMARKS_OBJECTS ${src} ${arch} ${kind}
                  ${ASAN_UNITTEST_INSTRUMENTED_CFLAGS} ${ARGN})
   endforeach()
-  add_asan_test(AsanBenchmarks "Asan-${arch}${kind}-Benchmark" ${arch} ${kind}
+  add_asan_test(AsanBenchmarks "Asan-${arch}${kind}-Benchmark"
+                ${arch} ${kind} SUBDIR "default"
                 OBJECTS ${ASAN_BENCHMARKS_OBJECTS}
                 LINKFLAGS ${ASAN_UNITTEST_INSTRUMENTED_LINKFLAGS})
-  if(COMPILER_RT_BUILD_SHARED_ASAN)
-    add_asan_test(AsanBenchmarks "Asan-${arch}${kind}-Dynamic-Benchmark" ${arch} ${kind}
-                  OBJECTS ${ASAN_BENCHMARKS_OBJECTS}
-                  LINKFLAGS ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS})
-  endif()
 endmacro()
 
 if(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID)
diff --git a/lib/asan/tests/asan_interface_test.cc b/lib/asan/tests/asan_interface_test.cc
index 50fdf11..a34c852 100644
--- a/lib/asan/tests/asan_interface_test.cc
+++ b/lib/asan/tests/asan_interface_test.cc
@@ -87,7 +87,7 @@
 }
 
 TEST(AddressSanitizerInterface, GetHeapSizeTest) {
-  // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
+  // ASan allocator does not keep huge chunks in free list, but unmaps them.
   // The chunk should be greater than the quarantine size,
   // otherwise it will be stuck in quarantine instead of being unmaped.
   static const size_t kLargeMallocSize = (1 << 28) + 1;  // 256M
diff --git a/lib/asan/tests/asan_noinst_test.cc b/lib/asan/tests/asan_noinst_test.cc
index bb6af45..6a428fb 100644
--- a/lib/asan/tests/asan_noinst_test.cc
+++ b/lib/asan/tests/asan_noinst_test.cc
@@ -31,18 +31,12 @@
 // in this test. The static runtime library is linked explicitly (without
 // -fsanitize=address), thus the interceptors do not work correctly on OS X.
 
-#if !defined(_WIN32)
-extern "C" {
-// Set specific ASan options for uninstrumented unittest.
-const char* __asan_default_options() {
-  return "allow_reexec=0";
-}
-}  // extern "C"
-#endif
-
 // Make sure __asan_init is called before any test case is run.
 struct AsanInitCaller {
-  AsanInitCaller() { __asan_init(); }
+  AsanInitCaller() {
+    __asan::DisableReexec();
+    __asan_init();
+  }
 };
 static AsanInitCaller asan_init_caller;
 
diff --git a/lib/asan/tests/asan_test.cc b/lib/asan/tests/asan_test.cc
index 67bcbac..952b05e 100644
--- a/lib/asan/tests/asan_test.cc
+++ b/lib/asan/tests/asan_test.cc
@@ -603,7 +603,8 @@
 }
 
 #if !defined(__ANDROID__) && !defined(__arm__) && \
-    !defined(__powerpc64__) && !defined(__powerpc__)
+    !defined(__powerpc64__) && !defined(__powerpc__) && \
+    !defined(__aarch64__)
 // Does not work on Power and ARM:
 // https://code.google.com/p/address-sanitizer/issues/detail?id=185
 TEST(AddressSanitizer, BuiltinLongJmpTest) {
@@ -1284,3 +1285,33 @@
   ASSERT_EQ(0, res);
 }
 #endif
+
+#if SANITIZER_TEST_HAS_PRINTF_L
+static int vsnprintf_l_wrapper(char *s, size_t n,
+                               locale_t l, const char *format, ...) {
+  va_list va;
+  va_start(va, format);
+  int res = vsnprintf_l(s, n , l, format, va);
+  va_end(va);
+  return res;
+}
+
+TEST(AddressSanitizer, snprintf_l) {
+  char buff[5];
+  // Check that snprintf_l() works fine with Asan.
+  int res = snprintf_l(buff, 5,
+                       _LIBCPP_GET_C_LOCALE, "%s", "snprintf_l()");
+  EXPECT_EQ(12, res);
+  // Check that vsnprintf_l() works fine with Asan.
+  res = vsnprintf_l_wrapper(buff, 5,
+                            _LIBCPP_GET_C_LOCALE, "%s", "vsnprintf_l()");
+  EXPECT_EQ(13, res);
+
+  EXPECT_DEATH(snprintf_l(buff, 10,
+                          _LIBCPP_GET_C_LOCALE, "%s", "snprintf_l()"),
+                "AddressSanitizer: stack-buffer-overflow");
+  EXPECT_DEATH(vsnprintf_l_wrapper(buff, 10,
+                                  _LIBCPP_GET_C_LOCALE, "%s", "vsnprintf_l()"),
+                "AddressSanitizer: stack-buffer-overflow");
+}
+#endif
diff --git a/lib/builtins/CMakeLists.txt b/lib/builtins/CMakeLists.txt
index 999faa8..4d102c6 100644
--- a/lib/builtins/CMakeLists.txt
+++ b/lib/builtins/CMakeLists.txt
@@ -79,7 +79,6 @@
   floatuntidf.c
   floatuntisf.c
   floatuntixf.c
-  gcc_personality_v0.c
   int_util.c
   lshrdi3.c
   lshrti3.c
@@ -137,6 +136,12 @@
   umodsi3.c
   umodti3.c)
 
+if (HAVE_UNWIND_H)
+  set(GENERIC_SOURCES
+      ${GENERIC_SOURCES}
+      gcc_personality_v0.c)
+endif ()
+
 set(x86_64_SOURCES
   x86_64/floatdidf.c
   x86_64/floatdisf.c
diff --git a/lib/builtins/assembly.h b/lib/builtins/assembly.h
index 8688a9b..8bb0ddc 100644
--- a/lib/builtins/assembly.h
+++ b/lib/builtins/assembly.h
@@ -28,6 +28,7 @@
 // tell linker it can break up file at label boundaries
 #define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
 #define SYMBOL_IS_FUNC(name)
+#define CONST_SECTION .const
 
 #elif defined(__ELF__)
 
@@ -39,10 +40,11 @@
 #else
 #define SYMBOL_IS_FUNC(name) .type name,@function
 #endif
+#define CONST_SECTION .section .rodata
 
 #else // !__APPLE__ && !__ELF__
 
-#define HIDDEN_DIRECTIVE(name)
+#define HIDDEN(name)
 #define LOCAL_LABEL(name) .L ## name
 #define FILE_LEVEL_DIRECTIVE
 #define SYMBOL_IS_FUNC(name)                                                   \
@@ -50,6 +52,7 @@
     .scl 2 SEPARATOR                                                           \
     .type 32 SEPARATOR                                                         \
   .endef
+#define CONST_SECTION .section .rdata,"rd"
 
 #endif
 
diff --git a/lib/builtins/atomic.c b/lib/builtins/atomic.c
index 02429a6..35c8837 100644
--- a/lib/builtins/atomic.c
+++ b/lib/builtins/atomic.c
@@ -28,20 +28,14 @@
 #include <stdint.h>
 #include <string.h>
 
+#include "assembly.h"
+
 // Clang objects if you redefine a builtin.  This little hack allows us to
 // define a function with the same name as an intrinsic.
-#if __APPLE__
-// mach-o has extra leading underscore
-#pragma redefine_extname __atomic_load_c ___atomic_load
-#pragma redefine_extname __atomic_store_c ___atomic_store
-#pragma redefine_extname __atomic_exchange_c ___atomic_exchange
-#pragma redefine_extname __atomic_compare_exchange_c ___atomic_compare_exchange
-#else
-#pragma redefine_extname __atomic_load_c __atomic_load
-#pragma redefine_extname __atomic_store_c __atomic_store
-#pragma redefine_extname __atomic_exchange_c __atomic_exchange
-#pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange
-#endif
+#pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
+#pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
+#pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
+#pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME(__atomic_compare_exchange)
 
 /// Number of locks.  This allocates one page on 32-bit platforms, two on
 /// 64-bit.  This can be specified externally if a different trade between
diff --git a/lib/builtins/clear_cache.c b/lib/builtins/clear_cache.c
index d329b80..8dc0fb1 100644
--- a/lib/builtins/clear_cache.c
+++ b/lib/builtins/clear_cache.c
@@ -13,14 +13,19 @@
 #if __APPLE__
   #include <libkern/OSCacheControl.h>
 #endif
+#if defined(__FreeBSD__) && defined(__arm__)
+  #include <sys/types.h>
+  #include <machine/sysarch.h>
+#endif
+
 #if defined(__NetBSD__) && defined(__arm__)
   #include <machine/sysarch.h>
 #endif
 
-#if defined(__ANDROID__) && defined(__mips__)
+#if defined(__mips__)
   #include <sys/cachectl.h>
   #include <sys/syscall.h>
-  #ifdef __LP64__
+  #if defined(__ANDROID__) && defined(__LP64__)
     /*
      * clear_mips_cache - Invalidates instruction cache for Mips.
      */
@@ -84,7 +89,7 @@
  * so there is nothing to do
  */
 #elif defined(__arm__) && !defined(__APPLE__)
-    #if defined(__NetBSD__)
+    #if defined(__FreeBSD__) || defined(__NetBSD__)
         struct arm_sync_icache_args arg;
 
         arg.addr = (uintptr_t)start;
@@ -92,7 +97,7 @@
 
         sysarch(ARM_SYNC_ICACHE, &arg);
     #elif defined(__ANDROID__)
-         const register int start_reg __asm("r0") = (int) (intptr_t) start;
+         register int start_reg __asm("r0") = (int) (intptr_t) start;
          const register int end_reg __asm("r1") = (int) (intptr_t) end;
          const register int flags __asm("r2") = 0;
          const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush;
@@ -104,10 +109,10 @@
     #else
         compilerrt_abort();
     #endif
-#elif defined(__ANDROID__) && defined(__mips__)
+#elif defined(__mips__)
   const uintptr_t start_int = (uintptr_t) start;
   const uintptr_t end_int = (uintptr_t) end;
-    #ifdef __LP64__
+    #if defined(__ANDROID__) && defined(__LP64__)
         // Call synci implementation for short address range.
         const uintptr_t address_range_limit = 256;
         if ((end_int - start_int) <= address_range_limit) {
diff --git a/lib/builtins/gcc_personality_v0.c b/lib/builtins/gcc_personality_v0.c
index 869f417..4b95cfd 100644
--- a/lib/builtins/gcc_personality_v0.c
+++ b/lib/builtins/gcc_personality_v0.c
@@ -11,47 +11,7 @@
 
 #include "int_lib.h"
 
-/*
- * _Unwind_* stuff based on C++ ABI public documentation
- * http://refspecs.freestandards.org/abi-eh-1.21.html
- */
-
-typedef enum {
-    _URC_NO_REASON = 0,
-    _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
-    _URC_FATAL_PHASE2_ERROR = 2,
-    _URC_FATAL_PHASE1_ERROR = 3,
-    _URC_NORMAL_STOP = 4,
-    _URC_END_OF_STACK = 5,
-    _URC_HANDLER_FOUND = 6,
-    _URC_INSTALL_CONTEXT = 7,
-    _URC_CONTINUE_UNWIND = 8
-} _Unwind_Reason_Code;
-
-typedef enum {
-    _UA_SEARCH_PHASE = 1,
-    _UA_CLEANUP_PHASE = 2,
-    _UA_HANDLER_FRAME = 4,
-    _UA_FORCE_UNWIND = 8,
-    _UA_END_OF_STACK = 16
-} _Unwind_Action;
-
-typedef struct _Unwind_Context* _Unwind_Context_t;
-
-struct _Unwind_Exception {
-    uint64_t                exception_class;
-    void                    (*exception_cleanup)(_Unwind_Reason_Code reason, 
-                                                 struct _Unwind_Exception* exc);
-    uintptr_t                private_1;    
-    uintptr_t                private_2;    
-};
-
-COMPILER_RT_ABI  const uint8_t*    _Unwind_GetLanguageSpecificData(_Unwind_Context_t c);
-COMPILER_RT_ABI  void              _Unwind_SetGR(_Unwind_Context_t c, int i, uintptr_t n);
-COMPILER_RT_ABI  void              _Unwind_SetIP(_Unwind_Context_t, uintptr_t new_value);
-COMPILER_RT_ABI  uintptr_t         _Unwind_GetIP(_Unwind_Context_t context);
-COMPILER_RT_ABI  uintptr_t         _Unwind_GetRegionStart(_Unwind_Context_t context);
-
+#include <unwind.h>
 
 /*
  * Pointer encodings documented at:
@@ -185,12 +145,12 @@
 COMPILER_RT_ABI _Unwind_Reason_Code
 __gcc_personality_sj0(int version, _Unwind_Action actions,
          uint64_t exceptionClass, struct _Unwind_Exception* exceptionObject,
-         _Unwind_Context_t context)
+         struct _Unwind_Context *context)
 #else
 COMPILER_RT_ABI _Unwind_Reason_Code
 __gcc_personality_v0(int version, _Unwind_Action actions,
          uint64_t exceptionClass, struct _Unwind_Exception* exceptionObject,
-         _Unwind_Context_t context)
+         struct _Unwind_Context *context)
 #endif
 {
     /* Since C does not have catch clauses, there is nothing to do during */
@@ -199,7 +159,7 @@
         return _URC_CONTINUE_UNWIND;
         
     /* There is nothing to do if there is no LSDA for this frame. */
-    const uint8_t* lsda = _Unwind_GetLanguageSpecificData(context);
+    const uint8_t* lsda = (uint8_t*)_Unwind_GetLanguageSpecificData(context);
     if ( lsda == (uint8_t*) 0 )
         return _URC_CONTINUE_UNWIND;
 
diff --git a/lib/builtins/i386/floatdidf.S b/lib/builtins/i386/floatdidf.S
index f4f5d01..dcc32f8 100644
--- a/lib/builtins/i386/floatdidf.S
+++ b/lib/builtins/i386/floatdidf.S
@@ -7,13 +7,7 @@
 
 #ifdef __i386__
 
-#if defined(__APPLE__)
-	.const
-#elif defined(__ELF__)
-	.section .rodata
-#else
-	.section .rdata,"rd"
-#endif
+CONST_SECTION
 
 	.balign 16
 twop52:
diff --git a/lib/builtins/i386/floatundidf.S b/lib/builtins/i386/floatundidf.S
index 676fed0..8058c2a 100644
--- a/lib/builtins/i386/floatundidf.S
+++ b/lib/builtins/i386/floatundidf.S
@@ -17,13 +17,7 @@
 
 #ifdef __i386__
 
-#if defined(__APPLE__)
-	.const
-#elif defined(__ELF__)
-	.section .rodata
-#else
-	.section .rdata,"rd"
-#endif
+CONST_SECTION
 
 	.balign 16
 twop52:
diff --git a/lib/builtins/i386/floatundisf.S b/lib/builtins/i386/floatundisf.S
index 5b81620..94c97e2 100644
--- a/lib/builtins/i386/floatundisf.S
+++ b/lib/builtins/i386/floatundisf.S
@@ -18,7 +18,7 @@
 
 #ifdef __i386__
 
-.const
+CONST_SECTION
 .balign 3
 
 		.quad	0x43f0000000000000
@@ -52,13 +52,7 @@
 
 #ifdef __i386__
 
-#if defined(__APPLE__)
-	.const
-#elif defined(__ELF__)
-	.section .rodata
-#else
-	.section .rdata,"rd"
-#endif
+CONST_SECTION
 
 	.balign 16
 twop52:
diff --git a/lib/builtins/i386/floatundixf.S b/lib/builtins/i386/floatundixf.S
index d60ad7d..814b52f 100644
--- a/lib/builtins/i386/floatundixf.S
+++ b/lib/builtins/i386/floatundixf.S
@@ -7,13 +7,7 @@
 
 #ifdef __i386__
 
-#if defined(__APPLE__)
-	.const
-#elif defined(__ELF__)
-	.section .rodata
-#else
-	.section .rdata,"rd"
-#endif
+CONST_SECTION
 
 	.balign 16
 twop52:
diff --git a/lib/builtins/int_endianness.h b/lib/builtins/int_endianness.h
index 4b35bde..7995ddb 100644
--- a/lib/builtins/int_endianness.h
+++ b/lib/builtins/int_endianness.h
@@ -16,6 +16,20 @@
 #ifndef INT_ENDIANNESS_H
 #define INT_ENDIANNESS_H
 
+#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+    defined(__ORDER_LITTLE_ENDIAN__)
+
+/* Clang and GCC provide built-in endianness definitions. */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN    1
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN    0
+#endif /* __BYTE_ORDER__ */
+
+#else /* Compilers other than Clang or GCC. */
+
 #if defined(__SVR4) && defined(__sun)
 #include <sys/byteorder.h>
 
@@ -84,18 +98,6 @@
 
 /* .. */
 
-#if defined(__linux__)
-
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define _YUGA_LITTLE_ENDIAN 0
-#define _YUGA_BIG_ENDIAN    1
-#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#define _YUGA_LITTLE_ENDIAN 1
-#define _YUGA_BIG_ENDIAN    0
-#endif /* __BYTE_ORDER__ */
-
-#endif /* GNU/Linux */
-
 #if defined(_WIN32)
 
 #define _YUGA_LITTLE_ENDIAN 1
@@ -103,6 +105,8 @@
 
 #endif /* Windows */
 
+#endif /* Clang or GCC. */
+
 /* . */
 
 #if !defined(_YUGA_LITTLE_ENDIAN) || !defined(_YUGA_BIG_ENDIAN)
diff --git a/lib/builtins/x86_64/floatundidf.S b/lib/builtins/x86_64/floatundidf.S
index d54b974..3cd5d02 100644
--- a/lib/builtins/x86_64/floatundidf.S
+++ b/lib/builtins/x86_64/floatundidf.S
@@ -17,13 +17,7 @@
 
 #ifdef __x86_64__
 
-#if defined(__APPLE__)
-	.const
-#elif defined(__ELF__)
-	.section .rodata
-#else
-	.section .rdata,"rd"
-#endif
+CONST_SECTION
 
 	.balign 16
 twop52:
diff --git a/lib/builtins/x86_64/floatundisf.S b/lib/builtins/x86_64/floatundisf.S
index e41f118..61952f4 100644
--- a/lib/builtins/x86_64/floatundisf.S
+++ b/lib/builtins/x86_64/floatundisf.S
@@ -7,13 +7,7 @@
 
 #ifdef __x86_64__
 
-#if defined(__APPLE__)
-	.literal4
-#elif defined(__ELF__)
-	.section .rodata
-#else
-	.section .rdata,"rd"
-#endif
+CONST_SECTION
 
 	.balign 16
 two:
diff --git a/lib/builtins/x86_64/floatundixf.S b/lib/builtins/x86_64/floatundixf.S
index 91bdc8a..92961c8 100644
--- a/lib/builtins/x86_64/floatundixf.S
+++ b/lib/builtins/x86_64/floatundixf.S
@@ -7,13 +7,7 @@
 
 #ifdef __x86_64__
 
-#if defined(__APPLE__)
-	.const
-#elif defined(__ELF__)
-	.section .rodata
-#else
-	.section .rdata,"rd"
-#endif
+CONST_SECTION
 
 	.balign 16
 twop64:
@@ -42,13 +36,8 @@
 
 #ifdef __x86_64__
 
-#if defined(__APPLE__)
-	.const
-#elif defined(__ELF__)
-	.rdata
-#else
-	.section .rdata,"rd"
-#endif
+CONST_SECTION
+
 	.balign 4
 twop52:
 	.quad 0x4330000000000000
diff --git a/lib/dfsan/CMakeLists.txt b/lib/dfsan/CMakeLists.txt
index daad07f..24ea876 100644
--- a/lib/dfsan/CMakeLists.txt
+++ b/lib/dfsan/CMakeLists.txt
@@ -6,13 +6,13 @@
   dfsan_custom.cc
   dfsan_interceptors.cc)
 set(DFSAN_COMMON_CFLAGS ${SANITIZER_COMMON_CFLAGS})
+append_no_rtti_flag(DFSAN_COMMON_CFLAGS)
 # Prevent clang from generating libc calls.
 append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding DFSAN_COMMON_CFLAGS)
 
 # Static runtime library.
 add_custom_target(dfsan)
-set(arch "x86_64")
-if(CAN_TARGET_${arch})
+foreach(arch ${DFSAN_SUPPORTED_ARCH})
   set(DFSAN_CFLAGS ${DFSAN_COMMON_CFLAGS})
   append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE DFSAN_CFLAGS)
   add_compiler_rt_runtime(clang_rt.dfsan-${arch} ${arch} STATIC
@@ -30,7 +30,7 @@
   add_dependencies(dfsan
     clang_rt.dfsan-${arch}
     clang_rt.dfsan-${arch}-symbols)
-endif()
+endforeach()
 
 set(dfsan_abilist_filename ${COMPILER_RT_OUTPUT_DIR}/dfsan_abilist.txt)
 add_custom_target(dfsan_abilist ALL
diff --git a/lib/dfsan/Makefile.mk b/lib/dfsan/Makefile.mk
deleted file mode 100644
index 4aeaac4..0000000
--- a/lib/dfsan/Makefile.mk
+++ /dev/null
@@ -1,23 +0,0 @@
-#===- lib/dfsan/Makefile.mk --------------------------------*- Makefile -*--===#
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := dfsan
-SubDirs :=
-
-Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file)))
-ObjNames := $(Sources:%.cc=%.o)
-
-Implementation := Generic
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard $(Dir)/*.h)
-Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h)
-
-# Define a convenience variable for all the dfsan functions.
-DfsanFunctions := $(Sources:%.cc=%)
diff --git a/lib/dfsan/dfsan.cc b/lib/dfsan/dfsan.cc
index dcc52b1..de5b2ce 100644
--- a/lib/dfsan/dfsan.cc
+++ b/lib/dfsan/dfsan.cc
@@ -22,6 +22,7 @@
 #include "sanitizer_common/sanitizer_atomic.h"
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
 #include "sanitizer_common/sanitizer_libc.h"
 
 #include "dfsan/dfsan.h"
@@ -63,12 +64,37 @@
 // account for the double byte representation of shadow labels and move the
 // address into the shadow memory range.  See the function shadow_for below.
 
+// On Linux/MIPS64, memory is laid out as follows:
+//
+// +--------------------+ 0x10000000000 (top of memory)
+// | application memory |
+// +--------------------+ 0xF000008000 (kAppAddr)
+// |                    |
+// |       unused       |
+// |                    |
+// +--------------------+ 0x2200000000 (kUnusedAddr)
+// |    union table     |
+// +--------------------+ 0x2000000000 (kUnionTableAddr)
+// |   shadow memory    |
+// +--------------------+ 0x0000010000 (kShadowAddr)
+// | reserved by kernel |
+// +--------------------+ 0x0000000000
+
 typedef atomic_dfsan_label dfsan_union_table_t[kNumLabels][kNumLabels];
 
+#if defined(__x86_64__)
 static const uptr kShadowAddr = 0x10000;
 static const uptr kUnionTableAddr = 0x200000000000;
 static const uptr kUnusedAddr = kUnionTableAddr + sizeof(dfsan_union_table_t);
 static const uptr kAppAddr = 0x700000008000;
+#elif defined(__mips64)
+static const uptr kShadowAddr = 0x10000;
+static const uptr kUnionTableAddr = 0x2000000000;
+static const uptr kUnusedAddr = kUnionTableAddr + sizeof(dfsan_union_table_t);
+static const uptr kAppAddr = 0xF000008000;
+#else
+# error "DFSan not supported for this platform!"
+#endif
 
 static atomic_dfsan_label *union_table(dfsan_label l1, dfsan_label l2) {
   return &(*(dfsan_union_table_t *) kUnionTableAddr)[l1][l2];
@@ -231,7 +257,7 @@
   return __dfsan_union_load(shadow_for(addr), size);
 }
 
-SANITIZER_INTERFACE_ATTRIBUTE
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
 const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label) {
   return &__dfsan_label_info[label];
 }
@@ -285,16 +311,24 @@
   }
 }
 
-static void InitializeFlags(Flags &f, const char *env) {
-  f.warn_unimplemented = true;
-  f.warn_nonzero_labels = false;
-  f.strict_data_dependencies = true;
-  f.dump_labels_at_exit = "";
+void Flags::SetDefaults() {
+#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "dfsan_flags.inc"
+#undef DFSAN_FLAG
+}
 
-  ParseFlag(env, &f.warn_unimplemented, "warn_unimplemented", "");
-  ParseFlag(env, &f.warn_nonzero_labels, "warn_nonzero_labels", "");
-  ParseFlag(env, &f.strict_data_dependencies, "strict_data_dependencies", "");
-  ParseFlag(env, &f.dump_labels_at_exit, "dump_labels_at_exit", "");
+static void RegisterDfsanFlags(FlagParser *parser, Flags *f) {
+#define DFSAN_FLAG(Type, Name, DefaultValue, Description) \
+  RegisterFlag(parser, #Name, Description, &f->Name);
+#include "dfsan_flags.inc"
+#undef DFSAN_FLAG
+}
+
+static void InitializeFlags() {
+  FlagParser parser;
+  RegisterDfsanFlags(&parser, &flags());
+  flags().SetDefaults();
+  parser.ParseString(GetEnv("DFSAN_OPTIONS"));
 }
 
 static void dfsan_fini() {
@@ -329,8 +363,7 @@
   if (!(init_addr >= kUnusedAddr && init_addr < kAppAddr))
     Mprotect(kUnusedAddr, kAppAddr - kUnusedAddr);
 
-  InitializeFlags(flags(), GetEnv("DFSAN_OPTIONS"));
-
+  InitializeFlags();
   InitializeInterceptors();
 
   // Register the fini callback to run when the program terminates successfully
diff --git a/lib/dfsan/dfsan.h b/lib/dfsan/dfsan.h
index 1b6c150..ceba353 100644
--- a/lib/dfsan/dfsan.h
+++ b/lib/dfsan/dfsan.h
@@ -44,7 +44,11 @@
 void InitializeInterceptors();
 
 inline dfsan_label *shadow_for(void *ptr) {
+#if defined(__x86_64__)
   return (dfsan_label *) ((((uptr) ptr) & ~0x700000000000) << 1);
+#elif defined(__mips64)
+  return (dfsan_label *) ((((uptr) ptr) & ~0xF000000000) << 1);
+#endif
 }
 
 inline const dfsan_label *shadow_for(const void *ptr) {
@@ -52,17 +56,11 @@
 }
 
 struct Flags {
-  // Whether to warn on unimplemented functions.
-  bool warn_unimplemented;
-  // Whether to warn on non-zero labels.
-  bool warn_nonzero_labels;
-  // Whether to propagate labels only when there is an obvious data dependency
-  // (e.g., when comparing strings, ignore the fact that the output of the
-  // comparison might be data-dependent on the content of the strings). This
-  // applies only to the custom functions defined in 'custom.c'.
-  bool strict_data_dependencies;
-  // The path of the file where to dump the labels when the program terminates.
-  const char* dump_labels_at_exit;
+#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "dfsan_flags.inc"
+#undef DFSAN_FLAG
+
+  void SetDefaults();
 };
 
 extern Flags flags_data;
diff --git a/lib/dfsan/dfsan_custom.cc b/lib/dfsan/dfsan_custom.cc
index 839a399..318ecd6 100644
--- a/lib/dfsan/dfsan_custom.cc
+++ b/lib/dfsan/dfsan_custom.cc
@@ -314,11 +314,12 @@
 SANITIZER_INTERFACE_ATTRIBUTE void *
 __dfsw_dlopen(const char *filename, int flag, dfsan_label filename_label,
               dfsan_label flag_label, dfsan_label *ret_label) {
-  link_map *map = (link_map *)dlopen(filename, flag);
+  void *handle = dlopen(filename, flag);
+  link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE(handle);
   if (map)
     ForEachMappedRegion(map, unpoison);
   *ret_label = 0;
-  return (void *)map;
+  return handle;
 }
 
 struct pthread_create_info {
diff --git a/lib/dfsan/dfsan_flags.inc b/lib/dfsan/dfsan_flags.inc
new file mode 100644
index 0000000..24fbfcb
--- /dev/null
+++ b/lib/dfsan/dfsan_flags.inc
@@ -0,0 +1,32 @@
+//===-- dfsan_flags.inc -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// DFSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef DFSAN_FLAG
+# error "Define DFSAN_FLAG prior to including this file!"
+#endif
+
+// DFSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+DFSAN_FLAG(bool, warn_unimplemented, true,
+           "Whether to warn on unimplemented functions.")
+DFSAN_FLAG(bool, warn_nonzero_labels, false,
+           "Whether to warn on unimplemented functions.")
+DFSAN_FLAG(
+    bool, strict_data_dependencies, true,
+    "Whether to propagate labels only when there is an obvious data dependency"
+    "(e.g., when comparing strings, ignore the fact that the output of the"
+    "comparison might be data-dependent on the content of the strings). This"
+    "applies only to the custom functions defined in 'custom.c'.")
+DFSAN_FLAG(const char *, dump_labels_at_exit, "", "The path of the file where "
+                                                  "to dump the labels when the "
+                                                  "program terminates.")
diff --git a/lib/lsan/Makefile.mk b/lib/lsan/Makefile.mk
index 2a6b41c..5e70634 100644
--- a/lib/lsan/Makefile.mk
+++ b/lib/lsan/Makefile.mk
@@ -20,9 +20,6 @@
 Dependencies += $(wildcard $(Dir)/../interception/*.h)
 Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h)
 
-# Define a convenience variable for all the lsan functions.
-LsanFunctions := $(Sources:%.cc=%)
-
 # lsan functions used in another sanitizers.
 LsanCommonSources := $(foreach file,$(wildcard $(Dir)/lsan_common*.cc),$(notdir $(file)))
 LsanCommonFunctions := $(LsanCommonSources:%.cc=%)
diff --git a/lib/lsan/lsan.cc b/lib/lsan/lsan.cc
index 1598fca..6018f7b 100644
--- a/lib/lsan/lsan.cc
+++ b/lib/lsan/lsan.cc
@@ -15,6 +15,7 @@
 #include "lsan.h"
 
 #include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
 #include "sanitizer_common/sanitizer_stacktrace.h"
 #include "lsan_allocator.h"
 #include "lsan_common.h"
@@ -34,13 +35,42 @@
 
 using namespace __lsan;  // NOLINT
 
+static void InitializeFlags() {
+  // Set all the default values.
+  SetCommonFlagsDefaults();
+  {
+    CommonFlags cf;
+    cf.CopyFrom(*common_flags());
+    cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
+    cf.malloc_context_size = 30;
+    cf.detect_leaks = true;
+    OverrideCommonFlags(cf);
+  }
+
+  Flags *f = flags();
+  f->SetDefaults();
+
+  FlagParser parser;
+  RegisterLsanFlags(&parser, f);
+  RegisterCommonFlags(&parser);
+
+  parser.ParseString(GetEnv("LSAN_OPTIONS"));
+
+  SetVerbosity(common_flags()->verbosity);
+
+  if (Verbosity()) ReportUnrecognizedFlags();
+
+  if (common_flags()->help) parser.PrintFlagDescriptions();
+}
+
 extern "C" void __lsan_init() {
   CHECK(!lsan_init_is_running);
   if (lsan_inited)
     return;
   lsan_init_is_running = true;
   SanitizerToolName = "LeakSanitizer";
-  InitCommonLsan(true);
+  InitializeFlags();
+  InitCommonLsan();
   InitializeAllocator();
   InitTlsSize();
   InitializeInterceptors();
@@ -52,6 +82,9 @@
 
   if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
     Atexit(DoLeakCheck);
+
+  InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
   lsan_inited = true;
   lsan_init_is_running = false;
 }
diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc
index 8be2a2a..67125db 100644
--- a/lib/lsan/lsan_allocator.cc
+++ b/lib/lsan/lsan_allocator.cc
@@ -25,10 +25,6 @@
 
 namespace __lsan {
 
-static const uptr kMaxAllowedMallocSize = 8UL << 30;
-static const uptr kAllocatorSpace = 0x600000000000ULL;
-static const uptr kAllocatorSize  =  0x40000000000ULL;  // 4T.
-
 struct ChunkMetadata {
   bool allocated : 8;  // Must be first.
   ChunkTag tag : 2;
@@ -36,8 +32,22 @@
   u32 stack_trace_id;
 };
 
+#if defined(__mips64)
+static const uptr kMaxAllowedMallocSize = 4UL << 30;
+static const uptr kRegionSizeLog = 20;
+static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+typedef CompactSizeClassMap SizeClassMap;
+typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
+    sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
+    PrimaryAllocator;
+#else
+static const uptr kMaxAllowedMallocSize = 8UL << 30;
+static const uptr kAllocatorSpace = 0x600000000000ULL;
+static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
         sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
+#endif
 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
 typedef LargeMmapAllocator<> SecondaryAllocator;
 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
@@ -47,7 +57,7 @@
 static THREADLOCAL AllocatorCache cache;
 
 void InitializeAllocator() {
-  allocator.Init();
+  allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
 }
 
 void AllocatorThreadFinish() {
diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc
index 746244c..a6119af 100644
--- a/lib/lsan/lsan_common.cc
+++ b/lib/lsan/lsan_common.cc
@@ -16,11 +16,11 @@
 
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
 #include "sanitizer_common/sanitizer_placement_new.h"
 #include "sanitizer_common/sanitizer_procmaps.h"
 #include "sanitizer_common/sanitizer_stackdepot.h"
 #include "sanitizer_common/sanitizer_stacktrace.h"
-#include "sanitizer_common/sanitizer_stoptheworld.h"
 #include "sanitizer_common/sanitizer_suppressions.h"
 #include "sanitizer_common/sanitizer_report_decorator.h"
 
@@ -36,52 +36,17 @@
 
 Flags lsan_flags;
 
-static void InitializeFlags(bool standalone) {
-  Flags *f = flags();
-  // Default values.
-  f->report_objects = false;
-  f->resolution = 0;
-  f->max_leaks = 0;
-  f->exitcode = 23;
-  f->use_registers = true;
-  f->use_globals = true;
-  f->use_stacks = true;
-  f->use_tls = true;
-  f->use_root_regions = true;
-  f->use_unaligned = false;
-  f->use_poisoned = false;
-  f->log_pointers = false;
-  f->log_threads = false;
+void Flags::SetDefaults() {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+}
 
-  const char *options = GetEnv("LSAN_OPTIONS");
-  if (options) {
-    ParseFlag(options, &f->use_registers, "use_registers", "");
-    ParseFlag(options, &f->use_globals, "use_globals", "");
-    ParseFlag(options, &f->use_stacks, "use_stacks", "");
-    ParseFlag(options, &f->use_tls, "use_tls", "");
-    ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
-    ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
-    ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
-    ParseFlag(options, &f->report_objects, "report_objects", "");
-    ParseFlag(options, &f->resolution, "resolution", "");
-    CHECK_GE(&f->resolution, 0);
-    ParseFlag(options, &f->max_leaks, "max_leaks", "");
-    CHECK_GE(&f->max_leaks, 0);
-    ParseFlag(options, &f->log_pointers, "log_pointers", "");
-    ParseFlag(options, &f->log_threads, "log_threads", "");
-    ParseFlag(options, &f->exitcode, "exitcode", "");
-  }
-
-  // Set defaults for common flags (only in standalone mode) and parse
-  // them from LSAN_OPTIONS.
-  CommonFlags *cf = common_flags();
-  if (standalone) {
-    SetCommonFlagsDefaults(cf);
-    cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
-    cf->malloc_context_size = 30;
-    cf->detect_leaks = true;
-  }
-  ParseCommonFlagsFromString(cf, options);
+void RegisterLsanFlags(FlagParser *parser, Flags *f) {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
+  RegisterFlag(parser, #Name, Description, &f->Name);
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
 }
 
 #define LOG_POINTERS(...)                           \
@@ -94,14 +59,23 @@
     if (flags()->log_threads) Report(__VA_ARGS__); \
   } while (0);
 
-static bool suppressions_inited = false;
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kSuppressionLeak[] = "leak";
+static const char *kSuppressionTypes[] = { kSuppressionLeak };
 
 void InitializeSuppressions() {
-  CHECK(!suppressions_inited);
-  SuppressionContext::InitIfNecessary();
+  CHECK_EQ(nullptr, suppression_ctx);
+  suppression_ctx = new (suppression_placeholder) // NOLINT
+      SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+  suppression_ctx->ParseFromFile(flags()->suppressions);
   if (&__lsan_default_suppressions)
-    SuppressionContext::Get()->Parse(__lsan_default_suppressions());
-  suppressions_inited = true;
+    suppression_ctx->Parse(__lsan_default_suppressions());
+}
+
+static SuppressionContext *GetSuppressionContext() {
+  CHECK(suppression_ctx);
+  return suppression_ctx;
 }
 
 struct RootRegion {
@@ -117,8 +91,7 @@
   root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
 }
 
-void InitCommonLsan(bool standalone) {
-  InitializeFlags(standalone);
+void InitCommonLsan() {
   InitializeRootRegions();
   if (common_flags()->detect_leaks) {
     // Initialization which can fail or print warnings should only be done if
@@ -141,9 +114,11 @@
   // bound on heap addresses.
   const uptr kMinAddress = 4 * 4096;
   if (p < kMinAddress) return false;
-#ifdef __x86_64__
+#if defined(__x86_64__)
   // Accept only canonical form user-space addresses.
   return ((p >> 47) == 0);
+#elif defined(__mips64)
+  return ((p >> 40) == 0);
 #else
   return true;
 #endif
@@ -367,7 +342,7 @@
   LsanMetadata m(chunk);
   if (!m.allocated()) return;
   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
-    uptr resolution = flags()->resolution;
+    u32 resolution = flags()->resolution;
     u32 stack_trace_id = 0;
     if (resolution > 0) {
       StackTrace stack = StackDepotGet(m.stack_trace_id());
@@ -383,7 +358,7 @@
 
 static void PrintMatchedSuppressions() {
   InternalMmapVector<Suppression *> matched(1);
-  SuppressionContext::Get()->GetMatched(&matched);
+  GetSuppressionContext()->GetMatched(&matched);
   if (!matched.size())
     return;
   const char *line = "-----------------------------------------------------";
@@ -424,7 +399,7 @@
   param.success = false;
   LockThreadRegistry();
   LockAllocator();
-  StopTheWorld(DoLeakCheckCallback, &param);
+  DoStopTheWorld(DoLeakCheckCallback, &param);
   UnlockAllocator();
   UnlockThreadRegistry();
 
@@ -457,30 +432,27 @@
 }
 
 static Suppression *GetSuppressionForAddr(uptr addr) {
-  Suppression *s;
+  Suppression *s = nullptr;
 
   // Suppress by module name.
   const char *module_name;
   uptr module_offset;
-  if (Symbolizer::GetOrInit()
-          ->GetModuleNameAndOffsetForPC(addr, &module_name, &module_offset) &&
-      SuppressionContext::Get()->Match(module_name, SuppressionLeak, &s))
+  SuppressionContext *suppressions = GetSuppressionContext();
+  if (Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(addr, &module_name,
+                                                           &module_offset) &&
+      suppressions->Match(module_name, kSuppressionLeak, &s))
     return s;
 
   // Suppress by file or function name.
-  static const uptr kMaxAddrFrames = 16;
-  InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
-  for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
-  uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
-      addr, addr_frames.data(), kMaxAddrFrames);
-  for (uptr i = 0; i < addr_frames_num; i++) {
-    if (SuppressionContext::Get()->Match(addr_frames[i].function,
-                                         SuppressionLeak, &s) ||
-        SuppressionContext::Get()->Match(addr_frames[i].file, SuppressionLeak,
-                                         &s))
-      return s;
+  SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
+  for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+    if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
+        suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
+      break;
+    }
   }
-  return 0;
+  frames->ClearAll();
+  return s;
 }
 
 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
@@ -593,10 +565,9 @@
       bytes += leaks_[i].total_size;
       allocations += leaks_[i].hit_count;
   }
-  InternalScopedBuffer<char> summary(kMaxSummaryLength);
-  internal_snprintf(summary.data(), summary.size(),
-                    "%zu byte(s) leaked in %zu allocation(s).", bytes,
-                    allocations);
+  InternalScopedString summary(kMaxSummaryLength);
+  summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
+                 allocations);
   ReportErrorSummary(summary.data());
 }
 
diff --git a/lib/lsan/lsan_common.h b/lib/lsan/lsan_common.h
index 86ff12d..4f9d24f 100644
--- a/lib/lsan/lsan_common.h
+++ b/lib/lsan/lsan_common.h
@@ -19,14 +19,20 @@
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_internal_defs.h"
 #include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
 #include "sanitizer_common/sanitizer_symbolizer.h"
 
-#if SANITIZER_LINUX && defined(__x86_64__) && (SANITIZER_WORDSIZE == 64)
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips64)) \
+    && (SANITIZER_WORDSIZE == 64)
 #define CAN_SANITIZE_LEAKS 1
 #else
 #define CAN_SANITIZE_LEAKS 0
 #endif
 
+namespace __sanitizer {
+class FlagParser;
+}
+
 namespace __lsan {
 
 // Chunk tags.
@@ -38,44 +44,19 @@
 };
 
 struct Flags {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+
+  void SetDefaults();
   uptr pointer_alignment() const {
     return use_unaligned ? 1 : sizeof(uptr);
   }
-
-  // Print addresses of leaked objects after main leak report.
-  bool report_objects;
-  // Aggregate two objects into one leak if this many stack frames match. If
-  // zero, the entire stack trace must match.
-  int resolution;
-  // The number of leaks reported.
-  int max_leaks;
-  // If nonzero kill the process with this exit code upon finding leaks.
-  int exitcode;
-
-  // Flags controlling the root set of reachable memory.
-  // Global variables (.data and .bss).
-  bool use_globals;
-  // Thread stacks.
-  bool use_stacks;
-  // Thread registers.
-  bool use_registers;
-  // TLS and thread-specific storage.
-  bool use_tls;
-  // Regions added via __lsan_register_root_region().
-  bool use_root_regions;
-
-  // Consider unaligned pointers valid.
-  bool use_unaligned;
-  // Consider pointers found in poisoned memory to be valid.
-  bool use_poisoned;
-
-  // Debug logging.
-  bool log_pointers;
-  bool log_threads;
 };
 
 extern Flags lsan_flags;
 inline Flags *flags() { return &lsan_flags; }
+void RegisterLsanFlags(FlagParser *parser, Flags *f);
 
 struct Leak {
   u32 id;
@@ -119,6 +100,8 @@
 void InitializePlatformSpecificModules();
 void ProcessGlobalRegions(Frontier *frontier);
 void ProcessPlatformSpecificAllocations(Frontier *frontier);
+// Run stoptheworld while holding any platform-specific locks.
+void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
 
 void ScanRangeForPointers(uptr begin, uptr end,
                           Frontier *frontier,
@@ -131,7 +114,7 @@
 };
 
 // Functions called from the parent tool.
-void InitCommonLsan(bool standalone);
+void InitCommonLsan();
 void DoLeakCheck();
 bool DisabledInThisThread();
 
diff --git a/lib/lsan/lsan_common_linux.cc b/lib/lsan/lsan_common_linux.cc
index ba51868..813e0b7 100644
--- a/lib/lsan/lsan_common_linux.cc
+++ b/lib/lsan/lsan_common_linux.cc
@@ -85,10 +85,6 @@
 // Scans global variables for heap pointers.
 void ProcessGlobalRegions(Frontier *frontier) {
   if (!flags()->use_globals) return;
-  // FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
-  // deadlocking by running this under StopTheWorld. However, the lock is
-  // reentrant, so we should be able to fix this by acquiring the lock before
-  // suspending threads.
   dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
 }
 
@@ -153,5 +149,30 @@
   ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
 }
 
+struct DoStopTheWorldParam {
+  StopTheWorldCallback callback;
+  void *argument;
+};
+
+static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
+                                  void *data) {
+  DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
+  StopTheWorld(param->callback, param->argument);
+  return 1;
+}
+
+// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
+// of the threads is frozen while holding the libdl lock, the tracer will hang
+// in dl_iterate_phdr() forever.
+// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
+// tracer task and the thread that spawned it. Thus, if we run the tracer task
+// while holding the libdl lock in the parent thread, we can safely reenter it
+// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
+// callback in the parent thread.
+void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
+  DoStopTheWorldParam param = {callback, argument};
+  dl_iterate_phdr(DoStopTheWorldCallback, &param);
+}
+
 }  // namespace __lsan
 #endif  // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
diff --git a/lib/lsan/lsan_flags.inc b/lib/lsan/lsan_flags.inc
new file mode 100644
index 0000000..b19b345
--- /dev/null
+++ b/lib/lsan/lsan_flags.inc
@@ -0,0 +1,45 @@
+//===-- lsan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// LSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LSAN_FLAG
+# error "Define LSAN_FLAG prior to including this file!"
+#endif
+
+// LSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+LSAN_FLAG(bool, report_objects, false,
+          "Print addresses of leaked objects after main leak report.")
+LSAN_FLAG(
+    int, resolution, 0,
+    "Aggregate two objects into one leak if this many stack frames match. If "
+    "zero, the entire stack trace must match.")
+LSAN_FLAG(int, max_leaks, 0, "The number of leaks reported.")
+LSAN_FLAG(int, exitcode, 23,
+          "If nonzero kill the process with this exit code upon finding leaks.")
+
+// Flags controlling the root set of reachable memory.
+LSAN_FLAG(bool, use_globals, true,
+          "Root set: include global variables (.data and .bss)")
+LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
+LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
+LSAN_FLAG(bool, use_tls, true,
+          "Root set: include TLS and thread-specific storage")
+LSAN_FLAG(bool, use_root_regions, true,
+          "Root set: include regions added via __lsan_register_root_region().")
+
+LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
+LSAN_FLAG(bool, use_poisoned, false,
+          "Consider pointers found in poisoned memory to be valid.")
+LSAN_FLAG(bool, log_pointers, false, "Debug logging")
+LSAN_FLAG(bool, log_threads, false, "Debug logging")
+LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
diff --git a/lib/lsan/lsan_interceptors.cc b/lib/lsan/lsan_interceptors.cc
index b01bbf8..ba2519d 100644
--- a/lib/lsan/lsan_interceptors.cc
+++ b/lib/lsan/lsan_interceptors.cc
@@ -215,9 +215,9 @@
   int tid = 0;
   while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
     internal_sched_yield();
-  atomic_store(&p->tid, 0, memory_order_release);
   SetCurrentThread(tid);
   ThreadStart(tid, GetTid());
+  atomic_store(&p->tid, 0, memory_order_release);
   return callback(param);
 }
 
diff --git a/lib/msan/CMakeLists.txt b/lib/msan/CMakeLists.txt
index 90d9fac..ccf47fc 100644
--- a/lib/msan/CMakeLists.txt
+++ b/lib/msan/CMakeLists.txt
@@ -10,6 +10,7 @@
   msan_new_delete.cc
   msan_report.cc
   msan_thread.cc
+  msan_poisoning.cc
   )
 
 set(MSAN_RTL_CFLAGS ${SANITIZER_COMMON_CFLAGS})
diff --git a/lib/msan/Makefile.mk b/lib/msan/Makefile.mk
deleted file mode 100644
index 99e3b03..0000000
--- a/lib/msan/Makefile.mk
+++ /dev/null
@@ -1,24 +0,0 @@
-#===- lib/msan/Makefile.mk ---------------------------------*- Makefile -*--===#
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := msan
-SubDirs :=
-
-Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file)))
-ObjNames := $(Sources:%.cc=%.o)
-
-Implementation := Generic
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard $(Dir)/*.h)
-Dependencies += $(wildcard $(Dir)/../interception/*.h)
-Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h)
-
-# Define a convenience variable for all the msan functions.
-MsanFunctions := $(Sources:%.cc=%)
diff --git a/lib/msan/msan.cc b/lib/msan/msan.cc
index 09622c4..caa7736 100644
--- a/lib/msan/msan.cc
+++ b/lib/msan/msan.cc
@@ -16,16 +16,17 @@
 #include "msan_chained_origin_depot.h"
 #include "msan_origin.h"
 #include "msan_thread.h"
+#include "msan_poisoning.h"
 #include "sanitizer_common/sanitizer_atomic.h"
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
 #include "sanitizer_common/sanitizer_libc.h"
 #include "sanitizer_common/sanitizer_procmaps.h"
 #include "sanitizer_common/sanitizer_stacktrace.h"
 #include "sanitizer_common/sanitizer_symbolizer.h"
 #include "sanitizer_common/sanitizer_stackdepot.h"
 
-
 // ACHTUNG! No system header includes in this file.
 
 using namespace __sanitizer;
@@ -96,19 +97,81 @@
 static uptr StackOriginPC[kNumStackOriginDescrs];
 static atomic_uint32_t NumStackOriginDescrs;
 
-static void ParseFlagsFromString(Flags *f, const char *str) {
-  CommonFlags *cf = common_flags();
-  ParseCommonFlagsFromString(cf, str);
-  ParseFlag(str, &f->poison_heap_with_zeroes, "poison_heap_with_zeroes", "");
-  ParseFlag(str, &f->poison_stack_with_zeroes, "poison_stack_with_zeroes", "");
-  ParseFlag(str, &f->poison_in_malloc, "poison_in_malloc", "");
-  ParseFlag(str, &f->poison_in_free, "poison_in_free", "");
-  ParseFlag(str, &f->exit_code, "exit_code", "");
+void Flags::SetDefaults() {
+#define MSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "msan_flags.inc"
+#undef MSAN_FLAG
+}
+
+// keep_going is an old name for halt_on_error,
+// and it has inverse meaning.
+class FlagHandlerKeepGoing : public FlagHandlerBase {
+  bool *halt_on_error_;
+
+ public:
+  explicit FlagHandlerKeepGoing(bool *halt_on_error)
+      : halt_on_error_(halt_on_error) {}
+  bool Parse(const char *value) final {
+    bool tmp;
+    FlagHandler<bool> h(&tmp);
+    if (!h.Parse(value)) return false;
+    *halt_on_error_ = !tmp;
+    return true;
+  }
+};
+
+static void RegisterMsanFlags(FlagParser *parser, Flags *f) {
+#define MSAN_FLAG(Type, Name, DefaultValue, Description) \
+  RegisterFlag(parser, #Name, Description, &f->Name);
+#include "msan_flags.inc"
+#undef MSAN_FLAG
+
+  FlagHandlerKeepGoing *fh_keep_going = new (FlagParser::Alloc)  // NOLINT
+      FlagHandlerKeepGoing(&f->halt_on_error);
+  parser->RegisterHandler("keep_going", fh_keep_going,
+                          "deprecated, use halt_on_error");
+}
+
+static void InitializeFlags() {
+  Flags *f = flags();
+  FlagParser parser;
+  RegisterMsanFlags(&parser, f);
+  RegisterCommonFlags(&parser);
+
+  SetCommonFlagsDefaults();
+  {
+    CommonFlags cf;
+    cf.CopyFrom(*common_flags());
+    cf.external_symbolizer_path = GetEnv("MSAN_SYMBOLIZER_PATH");
+    cf.malloc_context_size = 20;
+    cf.handle_ioctl = true;
+    // FIXME: test and enable.
+    cf.check_printf = false;
+    cf.intercept_tls_get_addr = true;
+    OverrideCommonFlags(cf);
+  }
+
+  f->SetDefaults();
+
+  // Override from user-specified string.
+  if (__msan_default_options)
+    parser.ParseString(__msan_default_options());
+
+  const char *msan_options = GetEnv("MSAN_OPTIONS");
+  parser.ParseString(msan_options);
+  VPrintf(1, "MSAN_OPTIONS: %s\n", msan_options ? msan_options : "<empty>");
+
+  SetVerbosity(common_flags()->verbosity);
+
+  if (Verbosity()) ReportUnrecognizedFlags();
+
+  if (common_flags()->help) parser.PrintFlagDescriptions();
+
+  // Check flag values:
   if (f->exit_code < 0 || f->exit_code > 127) {
     Printf("Exit code not in [0, 128) range: %d\n", f->exit_code);
     Die();
   }
-  ParseFlag(str, &f->origin_history_size, "origin_history_size", "");
   if (f->origin_history_size < 0 ||
       f->origin_history_size > Origin::kMaxDepth) {
     Printf(
@@ -117,8 +180,6 @@
         f->origin_history_size, Origin::kMaxDepth);
     Die();
   }
-  ParseFlag(str, &f->origin_history_per_stack_limit,
-            "origin_history_per_stack_limit", "");
   // Limiting to kStackDepotMaxUseCount / 2 to avoid overflow in
   // StackDepotHandle::inc_use_count_unsafe.
   if (f->origin_history_per_stack_limit < 0 ||
@@ -129,51 +190,7 @@
         f->origin_history_per_stack_limit, kStackDepotMaxUseCount / 2);
     Die();
   }
-
-  ParseFlag(str, &f->report_umrs, "report_umrs", "");
-  ParseFlag(str, &f->wrap_signals, "wrap_signals", "");
-  ParseFlag(str, &f->print_stats, "print_stats", "");
-  ParseFlag(str, &f->atexit, "atexit", "");
-  ParseFlag(str, &f->store_context_size, "store_context_size", "");
   if (f->store_context_size < 1) f->store_context_size = 1;
-
-  // keep_going is an old name for halt_on_error,
-  // and it has inverse meaning.
-  f->halt_on_error = !f->halt_on_error;
-  ParseFlag(str, &f->halt_on_error, "keep_going", "");
-  f->halt_on_error = !f->halt_on_error;
-  ParseFlag(str, &f->halt_on_error, "halt_on_error", "");
-}
-
-static void InitializeFlags(Flags *f, const char *options) {
-  CommonFlags *cf = common_flags();
-  SetCommonFlagsDefaults(cf);
-  cf->external_symbolizer_path = GetEnv("MSAN_SYMBOLIZER_PATH");
-  cf->malloc_context_size = 20;
-  cf->handle_ioctl = true;
-  // FIXME: test and enable.
-  cf->check_printf = false;
-  cf->intercept_tls_get_addr = true;
-
-  internal_memset(f, 0, sizeof(*f));
-  f->poison_heap_with_zeroes = false;
-  f->poison_stack_with_zeroes = false;
-  f->poison_in_malloc = true;
-  f->poison_in_free = true;
-  f->exit_code = 77;
-  f->origin_history_size = Origin::kMaxDepth;
-  f->origin_history_per_stack_limit = 20000;
-  f->report_umrs = true;
-  f->wrap_signals = true;
-  f->print_stats = false;
-  f->atexit = false;
-  f->halt_on_error = !&__msan_keep_going;
-  f->store_context_size = 20;
-
-  // Override from user-specified string.
-  if (__msan_default_options)
-    ParseFlagsFromString(f, __msan_default_options());
-  ParseFlagsFromString(f, options);
 }
 
 void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp,
@@ -205,10 +222,10 @@
   GET_FATAL_STACK_TRACE_PC_BP(pc, bp);
 
   u32 report_origin =
-    (__msan_get_track_origins() && Origin(origin).isValid()) ? origin : 0;
+    (__msan_get_track_origins() && Origin::isValidId(origin)) ? origin : 0;
   ReportUMR(&stack, report_origin);
 
-  if (__msan_get_track_origins() && !Origin(origin).isValid()) {
+  if (__msan_get_track_origins() && !Origin::isValidId(origin)) {
     Printf(
         "  ORIGIN: invalid (%x). Might be a bug in MemorySanitizer origin "
         "tracking.\n    This could still be a bug in your code, too!\n",
@@ -258,32 +275,10 @@
   if (t && t->InSignalHandler())
     return id;
 
-  Origin o(id);
-  int depth = o.depth();
-  // 0 means unlimited depth.
-  if (flags()->origin_history_size > 0 && depth > 0) {
-    if (depth >= flags()->origin_history_size) {
-      return id;
-    } else {
-      ++depth;
-    }
-  }
-
-  StackDepotHandle h = StackDepotPut_WithHandle(*stack);
-  if (!h.valid()) return id;
-
-  if (flags()->origin_history_per_stack_limit > 0) {
-    int use_count = h.use_count();
-    if (use_count > flags()->origin_history_per_stack_limit) return id;
-  }
-
-  u32 chained_id;
-  bool inserted = ChainedOriginDepotPut(h.id(), o.id(), &chained_id);
-
-  if (inserted && flags()->origin_history_per_stack_limit > 0)
-    h.inc_use_count_unsafe();
-
-  return Origin(chained_id, depth).raw_id();
+  Origin o = Origin::FromRawId(id);
+  stack->tag = StackTrace::TAG_UNKNOWN;
+  Origin chained = Origin::CreateChainedOrigin(o, stack);
+  return chained.raw_id();
 }
 
 }  // namespace __msan
@@ -359,9 +354,7 @@
   SetDieCallback(MsanDie);
   InitTlsSize();
 
-  const char *msan_options = GetEnv("MSAN_OPTIONS");
-  InitializeFlags(&msan_flags, msan_options);
-  if (common_flags()->help) PrintFlagDescriptions();
+  InitializeFlags();
   __sanitizer_set_report_path(common_flags()->log_path);
 
   InitializeInterceptors();
@@ -378,13 +371,10 @@
     ReExec();
   }
 
-  VPrintf(1, "MSAN_OPTIONS: %s\n", msan_options ? msan_options : "<empty>");
-
   __msan_clear_on_return();
   if (__msan_get_track_origins())
     VPrintf(1, "msan_track_origins\n");
-  if (!InitShadow(/* prot1 */ true, /* prot2 */ true,
-                  /* map_shadow */ true, __msan_get_track_origins())) {
+  if (!InitShadow(/* map_shadow */ true, __msan_get_track_origins())) {
     Printf("FATAL: MemorySanitizer can not mmap the shadow memory.\n");
     Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
     Printf("FATAL: Disabling ASLR is known to cause this error.\n");
@@ -396,6 +386,8 @@
 
   Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
 
+  InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
   MsanTSDInit(MsanTSDDtor);
 
   MsanThread *main_thread = MsanThread::Create(0, 0);
@@ -504,24 +496,7 @@
 }
 
 void __msan_set_origin(const void *a, uptr size, u32 origin) {
-  // Origin mapping is 4 bytes per 4 bytes of application memory.
-  // Here we extend the range such that its left and right bounds are both
-  // 4 byte aligned.
-  if (!__msan_get_track_origins()) return;
-  uptr x = MEM_TO_ORIGIN((uptr)a);
-  uptr beg = x & ~3UL;  // align down.
-  uptr end = (x + size + 3) & ~3UL;  // align up.
-  u64 origin64 = ((u64)origin << 32) | origin;
-  // This is like memset, but the value is 32-bit. We unroll by 2 to write
-  // 64 bits at once. May want to unroll further to get 128-bit stores.
-  if (beg & 7ULL) {
-    *(u32*)beg = origin;
-    beg += 4;
-  }
-  for (uptr addr = beg; addr < (end & ~7UL); addr += 8)
-    *(u64*)addr = origin64;
-  if (end & 7ULL)
-    *(u32*)(end - 4) = origin;
+  if (__msan_get_track_origins()) SetOrigin(a, size, origin);
 }
 
 // 'descr' is created at compile time and contains '----' in the beginning.
@@ -543,14 +518,14 @@
     CHECK_LT(idx, kNumStackOriginDescrs);
     StackOriginDescr[idx] = descr + 4;
     StackOriginPC[idx] = pc;
-    ChainedOriginDepotPut(idx, Origin::kStackRoot, &id);
+    id = Origin::CreateStackOrigin(idx).raw_id();
     *id_ptr = id;
     if (print)
       Printf("First time: idx=%d id=%d %s %p \n", idx, id, descr + 4, pc);
   }
   if (print)
     Printf("__msan_set_alloca_origin: descr=%s id=%x\n", descr + 4, id);
-  __msan_set_origin(a, size, Origin(id, 1).raw_id());
+  __msan_set_origin(a, size, id);
 }
 
 u32 __msan_chain_origin(u32 id) {
@@ -568,6 +543,13 @@
   return *(u32*)origin_ptr;
 }
 
+int __msan_origin_is_descendant_or_same(u32 this_id, u32 prev_id) {
+  Origin o = Origin::FromRawId(this_id);
+  while (o.raw_id() != prev_id && o.isChainedOrigin())
+    o = o.getNextChainedOrigin(nullptr);
+  return o.raw_id() == prev_id;
+}
+
 u32 __msan_get_umr_origin() {
   return __msan_origin_tls;
 }
diff --git a/lib/msan/msan.h b/lib/msan/msan.h
index aed8738..ed18f21 100644
--- a/lib/msan/msan.h
+++ b/lib/msan/msan.h
@@ -25,22 +25,91 @@
 # define MSAN_REPLACE_OPERATORS_NEW_AND_DELETE 1
 #endif
 
-#if defined(__mips64)
-#define MEM_TO_SHADOW(mem)       (((uptr)mem) & ~0x4000000000ULL)
-#define SHADOW_TO_ORIGIN(shadow) (((uptr)shadow) + 0x2000000000ULL)
-#define MEM_TO_ORIGIN(mem)       (SHADOW_TO_ORIGIN(MEM_TO_SHADOW(mem)))
-#define MEM_IS_APP(mem)          ((uptr)mem >= 0xe000000000ULL)
-#define MEM_IS_SHADOW(mem) \
-  ((uptr)mem >= 0xa000000000ULL && (uptr)mem <= 0xc000000000ULL)
-#elif defined(__x86_64__)
-#define MEM_TO_SHADOW(mem)       (((uptr)mem) & ~0x400000000000ULL)
-#define SHADOW_TO_ORIGIN(shadow) (((uptr)shadow) + 0x200000000000ULL)
-#define MEM_TO_ORIGIN(mem)       (SHADOW_TO_ORIGIN(MEM_TO_SHADOW(mem)))
-#define MEM_IS_APP(mem)          ((uptr)mem >= 0x600000000000ULL)
-#define MEM_IS_SHADOW(mem) \
-  ((uptr)mem >= 0x200000000000ULL && (uptr)mem <= 0x400000000000ULL)
+struct MappingDesc {
+  uptr start;
+  uptr end;
+  enum Type {
+    INVALID, APP, SHADOW, ORIGIN
+  } type;
+  const char *name;
+};
+
+
+#if SANITIZER_LINUX && defined(__mips64)
+
+// Everything is above 0x00e000000000.
+const MappingDesc kMemoryLayout[] = {
+    {0x000000000000ULL, 0x00a000000000ULL, MappingDesc::INVALID, "invalid"},
+    {0x00a000000000ULL, 0x00c000000000ULL, MappingDesc::SHADOW, "shadow"},
+    {0x00c000000000ULL, 0x00e000000000ULL, MappingDesc::ORIGIN, "origin"},
+    {0x00e000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app"}};
+
+#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x4000000000ULL)
+#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x002000000000)
+
+#elif SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 64
+
+// Low memory: main binary, MAP_32BIT mappings and modules
+// High memory: heap, modules and main thread stack
+const MappingDesc kMemoryLayout[] = {
+    {0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "low memory"},
+    {0x010000000000ULL, 0x100000000000ULL, MappingDesc::INVALID, "invalid"},
+    {0x100000000000ULL, 0x310000000000ULL, MappingDesc::SHADOW, "shadow"},
+    {0x310000000000ULL, 0x380000000000ULL, MappingDesc::INVALID, "invalid"},
+    {0x380000000000ULL, 0x590000000000ULL, MappingDesc::ORIGIN, "origin"},
+    {0x590000000000ULL, 0x600000000000ULL, MappingDesc::INVALID, "invalid"},
+    {0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "high memory"}};
+
+// Maps low and high app ranges to contiguous space with zero base:
+//   Low:  0000 0000 0000 - 00ff ffff ffff  ->  2000 0000 0000 - 20ff ffff ffff
+//   High: 6000 0000 0000 - 7fff ffff ffff  ->  0000 0000 0000 - 1fff ffff ffff
+#define LINEARIZE_MEM(mem) \
+  (((uptr)(mem) & ~0xc00000000000ULL) ^ 0x200000000000ULL)
+#define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x100000000000ULL)
+#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x280000000000)
+
+#elif SANITIZER_LINUX && SANITIZER_WORDSIZE == 64
+
+// Requries PIE binary and ASLR enabled.
+// Main thread stack and DSOs at 0x7f0000000000 (sometimes 0x7e0000000000).
+// Heap at 0x600000000000.
+const MappingDesc kMemoryLayout[] = {
+    {0x000000000000ULL, 0x200000000000ULL, MappingDesc::INVALID, "invalid"},
+    {0x200000000000ULL, 0x400000000000ULL, MappingDesc::SHADOW, "shadow"},
+    {0x400000000000ULL, 0x600000000000ULL, MappingDesc::ORIGIN, "origin"},
+    {0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app"}};
+
+#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x400000000000ULL)
+#define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x200000000000ULL)
+
+#else
+#error "Unsupported platform"
 #endif
 
+const uptr kMemoryLayoutSize = sizeof(kMemoryLayout) / sizeof(kMemoryLayout[0]);
+
+#define MEM_TO_ORIGIN(mem) (SHADOW_TO_ORIGIN(MEM_TO_SHADOW((mem))))
+
+#ifndef __clang__
+__attribute__((optimize("unroll-loops")))
+#endif
+inline bool addr_is_type(uptr addr, MappingDesc::Type mapping_type) {
+// It is critical for performance that this loop is unrolled (because then it is
+// simplified into just a few constant comparisons).
+#ifdef __clang__
+#pragma unroll
+#endif
+  for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
+    if (kMemoryLayout[i].type == mapping_type &&
+        addr >= kMemoryLayout[i].start && addr < kMemoryLayout[i].end)
+      return true;
+  return false;
+}
+
+#define MEM_IS_APP(mem) addr_is_type((uptr)(mem), MappingDesc::APP)
+#define MEM_IS_SHADOW(mem) addr_is_type((uptr)(mem), MappingDesc::SHADOW)
+#define MEM_IS_ORIGIN(mem) addr_is_type((uptr)(mem), MappingDesc::ORIGIN)
+
 // These constants must be kept in sync with the ones in MemorySanitizer.cc.
 const int kMsanParamTlsSize = 800;
 const int kMsanRetvalTlsSize = 800;
@@ -51,11 +120,12 @@
 extern int msan_report_count;
 
 bool ProtectRange(uptr beg, uptr end);
-bool InitShadow(bool prot1, bool prot2, bool map_shadow, bool init_origins);
+bool InitShadow(bool map_shadow, bool init_origins);
 char *GetProcSelfMaps();
 void InitializeInterceptors();
 
 void MsanAllocatorThreadFinish();
+void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size);
 void *MsanReallocate(StackTrace *stack, void *oldp, uptr size,
                      uptr alignment, bool zeroise);
 void MsanDeallocate(StackTrace *stack, void *ptr);
@@ -93,16 +163,12 @@
 void UnpoisonParam(uptr n);
 void UnpoisonThreadLocalState();
 
-u32 GetOriginIfPoisoned(uptr a, uptr size);
-void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size, u32 src_origin);
-void CopyOrigin(void *dst, const void *src, uptr size, StackTrace *stack);
-void MovePoison(void *dst, const void *src, uptr size, StackTrace *stack);
-void CopyPoison(void *dst, const void *src, uptr size, StackTrace *stack);
-
 // Returns a "chained" origin id, pointing to the given stack trace followed by
 // the previous origin id.
 u32 ChainOrigin(u32 id, StackTrace *stack);
 
+const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
+
 #define GET_MALLOC_STACK_TRACE                                                 \
   BufferedStackTrace stack;                                                    \
   if (__msan_get_track_origins() && msan_inited)                               \
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index aa1ea1d..698b6cd 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -14,12 +14,11 @@
 
 #include "sanitizer_common/sanitizer_allocator.h"
 #include "sanitizer_common/sanitizer_allocator_interface.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
 #include "msan.h"
 #include "msan_allocator.h"
-#include "msan_chained_origin_depot.h"
 #include "msan_origin.h"
 #include "msan_thread.h"
+#include "msan_poisoning.h"
 
 namespace __msan {
 
@@ -75,7 +74,7 @@
   if (inited) return;
   __msan_init();
   inited = true;  // this must happen before any threads are created.
-  allocator.Init();
+  allocator.Init(common_flags()->allocator_may_return_null);
 }
 
 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
@@ -94,7 +93,7 @@
   if (size > kMaxAllowedMallocSize) {
     Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
            (void *)size);
-    return AllocatorReturnNull();
+    return allocator.ReturnNullOrDie();
   }
   MsanThread *t = GetCurrentThread();
   void *allocated;
@@ -114,11 +113,9 @@
   } else if (flags()->poison_in_malloc) {
     __msan_poison(allocated, size);
     if (__msan_get_track_origins()) {
-      u32 stack_id = StackDepotPut(*stack);
-      CHECK(stack_id);
-      u32 id;
-      ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
-      __msan_set_origin(allocated, size, Origin(id, 1).raw_id());
+      stack->tag = StackTrace::TAG_ALLOC;
+      Origin o = Origin::CreateHeapOrigin(stack);
+      __msan_set_origin(allocated, size, o.raw_id());
     }
   }
   MSAN_MALLOC_HOOK(allocated, size);
@@ -137,11 +134,9 @@
   if (flags()->poison_in_free) {
     __msan_poison(p, size);
     if (__msan_get_track_origins()) {
-      u32 stack_id = StackDepotPut(*stack);
-      CHECK(stack_id);
-      u32 id;
-      ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
-      __msan_set_origin(p, size, Origin(id, 1).raw_id());
+      stack->tag = StackTrace::TAG_DEALLOC;
+      Origin o = Origin::CreateHeapOrigin(stack);
+      __msan_set_origin(p, size, o.raw_id());
     }
   }
   MsanThread *t = GetCurrentThread();
@@ -155,6 +150,13 @@
   }
 }
 
+void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
+  Init();
+  if (CallocShouldReturnNullDueToOverflow(size, nmemb))
+    return allocator.ReturnNullOrDie();
+  return MsanReallocate(stack, 0, nmemb * size, sizeof(u64), true);
+}
+
 void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
                      uptr alignment, bool zeroise) {
   if (!old_p)
@@ -169,15 +171,22 @@
   if (new_size <= actually_allocated_size) {
     // We are not reallocating here.
     meta->requested_size = new_size;
-    if (new_size > old_size)
-      __msan_poison((char*)old_p + old_size, new_size - old_size);
+    if (new_size > old_size) {
+      if (zeroise) {
+        __msan_clear_and_unpoison((char *)old_p + old_size,
+                                  new_size - old_size);
+      } else if (flags()->poison_in_malloc) {
+        stack->tag = StackTrace::TAG_ALLOC;
+        PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
+      }
+    }
     return old_p;
   }
   uptr memcpy_size = Min(new_size, old_size);
   void *new_p = MsanAllocate(stack, new_size, alignment, zeroise);
   // Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size);
   if (new_p) {
-    __msan_memcpy(new_p, old_p, memcpy_size);
+    CopyMemory(new_p, old_p, memcpy_size, stack);
     MsanDeallocate(stack, old_p);
   }
   return new_p;
diff --git a/lib/msan/msan_chained_origin_depot.cc b/lib/msan/msan_chained_origin_depot.cc
index f3fb3c8..c21e8e8 100644
--- a/lib/msan/msan_chained_origin_depot.cc
+++ b/lib/msan/msan_chained_origin_depot.cc
@@ -94,8 +94,7 @@
   typedef Handle handle_type;
 };
 
-// kTabSizeLog = 22 => 32Mb static storage for bucket pointers.
-static StackDepotBase<ChainedOriginDepotNode, 3, 20> chainedOriginDepot;
+static StackDepotBase<ChainedOriginDepotNode, 4, 20> chainedOriginDepot;
 
 StackDepotStats *ChainedOriginDepotGetStats() {
   return chainedOriginDepot.GetStats();
diff --git a/lib/msan/msan_flags.h b/lib/msan/msan_flags.h
index 9b93f11..4fc6d17 100644
--- a/lib/msan/msan_flags.h
+++ b/lib/msan/msan_flags.h
@@ -9,28 +9,18 @@
 //
 // This file is a part of MemorySanitizer.
 //
-// MemorySanitizer allocator.
 //===----------------------------------------------------------------------===//
 #ifndef MSAN_FLAGS_H
 #define MSAN_FLAGS_H
 
 namespace __msan {
 
-// Flags.
 struct Flags {
-  int exit_code;
-  int origin_history_size;
-  int origin_history_per_stack_limit;
-  bool poison_heap_with_zeroes;  // default: false
-  bool poison_stack_with_zeroes;  // default: false
-  bool poison_in_malloc;  // default: true
-  bool poison_in_free;  // default: true
-  bool report_umrs;
-  bool wrap_signals;
-  bool print_stats;
-  bool halt_on_error;
-  bool atexit;
-  int store_context_size; // like malloc_context_size, but for uninit stores
+#define MSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "msan_flags.inc"
+#undef MSAN_FLAG
+
+  void SetDefaults();
 };
 
 Flags *flags();
diff --git a/lib/msan/msan_flags.inc b/lib/msan/msan_flags.inc
new file mode 100644
index 0000000..cb58ffc
--- /dev/null
+++ b/lib/msan/msan_flags.inc
@@ -0,0 +1,33 @@
+//===-- msan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// MSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_FLAG
+# error "Define MSAN_FLAG prior to including this file!"
+#endif
+
+// MSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+MSAN_FLAG(int, exit_code, 77, "")
+MSAN_FLAG(int, origin_history_size, Origin::kMaxDepth, "")
+MSAN_FLAG(int, origin_history_per_stack_limit, 20000, "")
+MSAN_FLAG(bool, poison_heap_with_zeroes, false, "")
+MSAN_FLAG(bool, poison_stack_with_zeroes, false, "")
+MSAN_FLAG(bool, poison_in_malloc, true, "")
+MSAN_FLAG(bool, poison_in_free, true, "")
+MSAN_FLAG(bool, report_umrs, true, "")
+MSAN_FLAG(bool, wrap_signals, true, "")
+MSAN_FLAG(bool, print_stats, false, "")
+MSAN_FLAG(bool, halt_on_error, !&__msan_keep_going, "")
+MSAN_FLAG(bool, atexit, false, "")
+MSAN_FLAG(int, store_context_size, 20,
+          "Like malloc_context_size, but for uninit stores.")
diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc
index aa6b1ff..4a24394 100644
--- a/lib/msan/msan_interceptors.cc
+++ b/lib/msan/msan_interceptors.cc
@@ -20,6 +20,7 @@
 #include "msan_chained_origin_depot.h"
 #include "msan_origin.h"
 #include "msan_thread.h"
+#include "msan_poisoning.h"
 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
 #include "sanitizer_common/sanitizer_allocator.h"
 #include "sanitizer_common/sanitizer_allocator_interface.h"
@@ -42,6 +43,10 @@
 using __sanitizer::atomic_store;
 using __sanitizer::atomic_uintptr_t;
 
+#if SANITIZER_FREEBSD
+#define __errno_location __error
+#endif
+
 // True if this is a nested interceptor.
 static THREADLOCAL int in_interceptor_scope;
 
@@ -97,6 +102,7 @@
   return res;
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(SIZE_T, fread_unlocked, void *ptr, SIZE_T size, SIZE_T nmemb,
             void *file) {
   ENSURE_MSAN_INITED();
@@ -105,6 +111,10 @@
     __msan_unpoison(ptr, res *size);
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED INTERCEPT_FUNCTION(fread_unlocked)
+#else
+#define MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED
+#endif
 
 INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) {
   ENSURE_MSAN_INITED();
@@ -154,12 +164,17 @@
   return 0;
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(void *, memalign, SIZE_T boundary, SIZE_T size) {
   GET_MALLOC_STACK_TRACE;
   CHECK_EQ(boundary & (boundary - 1), 0);
   void *ptr = MsanReallocate(&stack, 0, size, boundary, false);
   return ptr;
 }
+#define MSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
+#else
+#define MSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif
 
 INTERCEPTOR(void *, aligned_alloc, SIZE_T boundary, SIZE_T size) {
   GET_MALLOC_STACK_TRACE;
@@ -182,6 +197,7 @@
   return ptr;
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(void *, pvalloc, SIZE_T size) {
   GET_MALLOC_STACK_TRACE;
   uptr PageSize = GetPageSizeCached();
@@ -193,6 +209,10 @@
   void *ptr = MsanReallocate(&stack, 0, size, PageSize, false);
   return ptr;
 }
+#define MSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
+#else
+#define MSAN_MAYBE_INTERCEPT_PVALLOC
+#endif
 
 INTERCEPTOR(void, free, void *ptr) {
   GET_MALLOC_STACK_TRACE;
@@ -200,16 +220,22 @@
   MsanDeallocate(&stack, ptr);
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(void, cfree, void *ptr) {
   GET_MALLOC_STACK_TRACE;
   if (ptr == 0) return;
   MsanDeallocate(&stack, ptr);
 }
+#define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
+#else
+#define MSAN_MAYBE_INTERCEPT_CFREE
+#endif
 
 INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
   return __sanitizer_get_allocated_size(ptr);
 }
 
+#if !SANITIZER_FREEBSD
 // This function actually returns a struct by value, but we can't unpoison a
 // temporary! The following is equivalent on all supported platforms, and we
 // have a test to confirm that.
@@ -217,16 +243,32 @@
   REAL(memset)(sret, 0, sizeof(*sret));
   __msan_unpoison(sret, sizeof(*sret));
 }
+#define MSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
+#else
+#define MSAN_MAYBE_INTERCEPT_MALLINFO
+#endif
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(int, mallopt, int cmd, int value) {
   return -1;
 }
+#define MSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt)
+#else
+#define MSAN_MAYBE_INTERCEPT_MALLOPT
+#endif
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(void, malloc_stats, void) {
   // FIXME: implement, but don't call REAL(malloc_stats)!
 }
+#define MSAN_MAYBE_INTERCEPT_MALLOC_STATS INTERCEPT_FUNCTION(malloc_stats)
+#else
+#define MSAN_MAYBE_INTERCEPT_MALLOC_STATS
+#endif
 
 INTERCEPTOR(SIZE_T, strlen, const char *s) {
+  if (msan_init_is_running)
+    return REAL(strlen)(s);
   ENSURE_MSAN_INITED();
   SIZE_T res = REAL(strlen)(s);
   CHECK_UNPOISONED(s, res + 1);
@@ -249,7 +291,7 @@
   GET_STORE_STACK_TRACE;
   SIZE_T n = REAL(strlen)(src);
   char *res = REAL(strcpy)(dest, src);  // NOLINT
-  CopyPoison(dest, src, n + 1, &stack);
+  CopyShadowAndOrigin(dest, src, n + 1, &stack);
   return res;
 }
 
@@ -260,7 +302,7 @@
   if (copy_size < n)
     copy_size++;  // trailing \0
   char *res = REAL(strncpy)(dest, src, n);  // NOLINT
-  CopyPoison(dest, src, copy_size, &stack);
+  CopyShadowAndOrigin(dest, src, copy_size, &stack);
   __msan_unpoison(dest + copy_size, n - copy_size);
   return res;
 }
@@ -270,47 +312,61 @@
   GET_STORE_STACK_TRACE;
   SIZE_T n = REAL(strlen)(src);
   char *res = REAL(stpcpy)(dest, src);  // NOLINT
-  CopyPoison(dest, src, n + 1, &stack);
+  CopyShadowAndOrigin(dest, src, n + 1, &stack);
   return res;
 }
 
 INTERCEPTOR(char *, strdup, char *src) {
   ENSURE_MSAN_INITED();
   GET_STORE_STACK_TRACE;
+  // On FreeBSD strdup() leverages strlen().
+  InterceptorScope interceptor_scope;
   SIZE_T n = REAL(strlen)(src);
   char *res = REAL(strdup)(src);
-  CopyPoison(res, src, n + 1, &stack);
+  CopyShadowAndOrigin(res, src, n + 1, &stack);
   return res;
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(char *, __strdup, char *src) {
   ENSURE_MSAN_INITED();
   GET_STORE_STACK_TRACE;
   SIZE_T n = REAL(strlen)(src);
   char *res = REAL(__strdup)(src);
-  CopyPoison(res, src, n + 1, &stack);
+  CopyShadowAndOrigin(res, src, n + 1, &stack);
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT___STRDUP INTERCEPT_FUNCTION(__strdup)
+#else
+#define MSAN_MAYBE_INTERCEPT___STRDUP
+#endif
 
 INTERCEPTOR(char *, strndup, char *src, SIZE_T n) {
   ENSURE_MSAN_INITED();
   GET_STORE_STACK_TRACE;
+  // On FreeBSD strndup() leverages strnlen().
+  InterceptorScope interceptor_scope;
   SIZE_T copy_size = REAL(strnlen)(src, n);
   char *res = REAL(strndup)(src, n);
-  CopyPoison(res, src, copy_size, &stack);
+  CopyShadowAndOrigin(res, src, copy_size, &stack);
   __msan_unpoison(res + copy_size, 1); // \0
   return res;
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(char *, __strndup, char *src, SIZE_T n) {
   ENSURE_MSAN_INITED();
   GET_STORE_STACK_TRACE;
   SIZE_T copy_size = REAL(strnlen)(src, n);
   char *res = REAL(__strndup)(src, n);
-  CopyPoison(res, src, copy_size, &stack);
+  CopyShadowAndOrigin(res, src, copy_size, &stack);
   __msan_unpoison(res + copy_size, 1); // \0
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT___STRNDUP INTERCEPT_FUNCTION(__strndup)
+#else
+#define MSAN_MAYBE_INTERCEPT___STRNDUP
+#endif
 
 INTERCEPTOR(char *, gcvt, double number, SIZE_T ndigit, char *buf) {
   ENSURE_MSAN_INITED();
@@ -326,7 +382,7 @@
   SIZE_T src_size = REAL(strlen)(src);
   SIZE_T dest_size = REAL(strlen)(dest);
   char *res = REAL(strcat)(dest, src);  // NOLINT
-  CopyPoison(dest + dest_size, src, src_size + 1, &stack);
+  CopyShadowAndOrigin(dest + dest_size, src, src_size + 1, &stack);
   return res;
 }
 
@@ -336,7 +392,7 @@
   SIZE_T dest_size = REAL(strlen)(dest);
   SIZE_T copy_size = REAL(strnlen)(src, n);
   char *res = REAL(strncat)(dest, src, n);  // NOLINT
-  CopyPoison(dest + dest_size, src, copy_size, &stack);
+  CopyShadowAndOrigin(dest + dest_size, src, copy_size, &stack);
   __msan_unpoison(dest + dest_size + copy_size, 1); // \0
   return res;
 }
@@ -349,55 +405,63 @@
   __msan_unpoison(endptr, sizeof(*endptr));         \
   return res;
 
-#define INTERCEPTOR_STRTO(ret_type, func)                        \
-  INTERCEPTOR(ret_type, func, const char *nptr, char **endptr) { \
-    INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr);        \
+#define INTERCEPTOR_STRTO(ret_type, func, char_type)                       \
+  INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr) { \
+    INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr);                  \
   }
 
-#define INTERCEPTOR_STRTO_BASE(ret_type, func)                             \
-  INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, int base) { \
-    INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base);            \
+#define INTERCEPTOR_STRTO_BASE(ret_type, func, char_type)                \
+  INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
+              int base) {                                                \
+    INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base);          \
   }
 
-#define INTERCEPTOR_STRTO_LOC(ret_type, func)                               \
-  INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, void *loc) { \
-    INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, loc);              \
-  }
-
-#define INTERCEPTOR_STRTO_BASE_LOC(ret_type, func)                       \
-  INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, int base, \
+#define INTERCEPTOR_STRTO_LOC(ret_type, func, char_type)                 \
+  INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
               void *loc) {                                               \
+    INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, loc);           \
+  }
+
+#define INTERCEPTOR_STRTO_BASE_LOC(ret_type, func, char_type)            \
+  INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
+              int base, void *loc) {                                     \
     INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base, loc);     \
   }
 
-INTERCEPTOR_STRTO(double, strtod)                                    // NOLINT
-INTERCEPTOR_STRTO(float, strtof)                                     // NOLINT
-INTERCEPTOR_STRTO(long double, strtold)                              // NOLINT
-INTERCEPTOR_STRTO_BASE(long, strtol)                                 // NOLINT
-INTERCEPTOR_STRTO_BASE(long long, strtoll)                           // NOLINT
-INTERCEPTOR_STRTO_BASE(unsigned long, strtoul)                       // NOLINT
-INTERCEPTOR_STRTO_BASE(unsigned long long, strtoull)                 // NOLINT
-INTERCEPTOR_STRTO_LOC(double, strtod_l)                              // NOLINT
-INTERCEPTOR_STRTO_LOC(double, __strtod_l)                            // NOLINT
-INTERCEPTOR_STRTO_LOC(double, __strtod_internal)                     // NOLINT
-INTERCEPTOR_STRTO_LOC(float, strtof_l)                               // NOLINT
-INTERCEPTOR_STRTO_LOC(float, __strtof_l)                             // NOLINT
-INTERCEPTOR_STRTO_LOC(float, __strtof_internal)                      // NOLINT
-INTERCEPTOR_STRTO_LOC(long double, strtold_l)                        // NOLINT
-INTERCEPTOR_STRTO_LOC(long double, __strtold_l)                      // NOLINT
-INTERCEPTOR_STRTO_LOC(long double, __strtold_internal)               // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long, strtol_l)                           // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long, __strtol_l)                         // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long, __strtol_internal)                  // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long long, strtoll_l)                     // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long long, __strtoll_l)                   // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long long, __strtoll_internal)            // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long, strtoul_l)                 // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long, __strtoul_l)               // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long, __strtoul_internal)        // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long long, strtoull_l)           // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long long, __strtoull_l)         // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long long, __strtoull_internal)  // NOLINT
+#define INTERCEPTORS_STRTO(ret_type, func, char_type)      \
+  INTERCEPTOR_STRTO(ret_type, func, char_type)             \
+  INTERCEPTOR_STRTO_LOC(ret_type, func##_l, char_type)     \
+  INTERCEPTOR_STRTO_LOC(ret_type, __##func##_l, char_type) \
+  INTERCEPTOR_STRTO_LOC(ret_type, __##func##_internal, char_type)
+
+#define INTERCEPTORS_STRTO_BASE(ret_type, func, char_type)      \
+  INTERCEPTOR_STRTO_BASE(ret_type, func, char_type)             \
+  INTERCEPTOR_STRTO_BASE_LOC(ret_type, func##_l, char_type)     \
+  INTERCEPTOR_STRTO_BASE_LOC(ret_type, __##func##_l, char_type) \
+  INTERCEPTOR_STRTO_BASE_LOC(ret_type, __##func##_internal, char_type)
+
+INTERCEPTORS_STRTO(double, strtod, char)                     // NOLINT
+INTERCEPTORS_STRTO(float, strtof, char)                      // NOLINT
+INTERCEPTORS_STRTO(long double, strtold, char)               // NOLINT
+INTERCEPTORS_STRTO_BASE(long, strtol, char)                  // NOLINT
+INTERCEPTORS_STRTO_BASE(long long, strtoll, char)            // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long, strtoul, char)        // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long long, strtoull, char)  // NOLINT
+
+INTERCEPTORS_STRTO(double, wcstod, wchar_t)                     // NOLINT
+INTERCEPTORS_STRTO(float, wcstof, wchar_t)                      // NOLINT
+INTERCEPTORS_STRTO(long double, wcstold, wchar_t)               // NOLINT
+INTERCEPTORS_STRTO_BASE(long, wcstol, wchar_t)                  // NOLINT
+INTERCEPTORS_STRTO_BASE(long long, wcstoll, wchar_t)            // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long, wcstoul, wchar_t)        // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long long, wcstoull, wchar_t)  // NOLINT
+
+#define INTERCEPT_STRTO(func) \
+  INTERCEPT_FUNCTION(func); \
+  INTERCEPT_FUNCTION(func##_l); \
+  INTERCEPT_FUNCTION(__##func##_l); \
+  INTERCEPT_FUNCTION(__##func##_internal);
+
 
 // FIXME: support *wprintf in common format interceptors.
 INTERCEPTOR(int, vswprintf, void *str, uptr size, void *format, va_list ap) {
@@ -451,11 +515,16 @@
   INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, strftime_l, s, max, format, tm, loc);
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(SIZE_T, __strftime_l, char *s, SIZE_T max, const char *format,
             __sanitizer_tm *tm, void *loc) {
   INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, __strftime_l, s, max, format, tm,
                             loc);
 }
+#define MSAN_MAYBE_INTERCEPT___STRFTIME_L INTERCEPT_FUNCTION(__strftime_l)
+#else
+#define MSAN_MAYBE_INTERCEPT___STRFTIME_L
+#endif
 
 INTERCEPTOR(SIZE_T, wcsftime, wchar_t *s, SIZE_T max, const wchar_t *format,
             __sanitizer_tm *tm) {
@@ -468,11 +537,16 @@
                             loc);
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(SIZE_T, __wcsftime_l, wchar_t *s, SIZE_T max, const wchar_t *format,
             __sanitizer_tm *tm, void *loc) {
   INTERCEPTOR_STRFTIME_BODY(wchar_t, SIZE_T, __wcsftime_l, s, max, format, tm,
                             loc);
 }
+#define MSAN_MAYBE_INTERCEPT___WCSFTIME_L INTERCEPT_FUNCTION(__wcsftime_l)
+#else
+#define MSAN_MAYBE_INTERCEPT___WCSFTIME_L
+#endif
 
 INTERCEPTOR(int, mbtowc, wchar_t *dest, const char *src, SIZE_T n) {
   ENSURE_MSAN_INITED();
@@ -507,7 +581,8 @@
   ENSURE_MSAN_INITED();
   GET_STORE_STACK_TRACE;
   wchar_t *res = REAL(wcscpy)(dest, src);
-  CopyPoison(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1), &stack);
+  CopyShadowAndOrigin(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1),
+                      &stack);
   return res;
 }
 
@@ -516,7 +591,7 @@
   ENSURE_MSAN_INITED();
   GET_STORE_STACK_TRACE;
   wchar_t *res = REAL(wmemcpy)(dest, src, n);
-  CopyPoison(dest, src, n * sizeof(wchar_t), &stack);
+  CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
   return res;
 }
 
@@ -524,14 +599,14 @@
   ENSURE_MSAN_INITED();
   GET_STORE_STACK_TRACE;
   wchar_t *res = REAL(wmempcpy)(dest, src, n);
-  CopyPoison(dest, src, n * sizeof(wchar_t), &stack);
+  CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
   return res;
 }
 
 INTERCEPTOR(wchar_t *, wmemset, wchar_t *s, wchar_t c, SIZE_T n) {
   CHECK(MEM_IS_APP(s));
   ENSURE_MSAN_INITED();
-  wchar_t *res = (wchar_t *)REAL(memset)(s, c, n * sizeof(wchar_t));
+  wchar_t *res = REAL(wmemset)(s, c, n);
   __msan_unpoison(s, n * sizeof(wchar_t));
   return res;
 }
@@ -540,7 +615,7 @@
   ENSURE_MSAN_INITED();
   GET_STORE_STACK_TRACE;
   wchar_t *res = REAL(wmemmove)(dest, src, n);
-  MovePoison(dest, src, n * sizeof(wchar_t), &stack);
+  MoveShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
   return res;
 }
 
@@ -550,13 +625,6 @@
   return res;
 }
 
-INTERCEPTOR(double, wcstod, const wchar_t *nptr, wchar_t **endptr) {
-  ENSURE_MSAN_INITED();
-  double res = REAL(wcstod)(nptr, endptr);
-  __msan_unpoison(endptr, sizeof(*endptr));
-  return res;
-}
-
 INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
   ENSURE_MSAN_INITED();
   int res = REAL(gettimeofday)(tv, tz);
@@ -577,6 +645,8 @@
 }
 
 INTERCEPTOR(char *, getenv, char *name) {
+  if (msan_init_is_running)
+    return REAL(getenv)(name);
   ENSURE_MSAN_INITED();
   char *res = REAL(getenv)(name);
   if (res) __msan_unpoison(res, REAL(strlen)(res) + 1);
@@ -609,6 +679,7 @@
   return res;
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(int, __fxstat, int magic, int fd, void *buf) {
   ENSURE_MSAN_INITED();
   int res = REAL(__fxstat)(magic, fd, buf);
@@ -616,7 +687,12 @@
     __msan_unpoison(buf, __sanitizer::struct_stat_sz);
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT___FXSTAT INTERCEPT_FUNCTION(__fxstat)
+#else
+#define MSAN_MAYBE_INTERCEPT___FXSTAT
+#endif
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(int, __fxstat64, int magic, int fd, void *buf) {
   ENSURE_MSAN_INITED();
   int res = REAL(__fxstat64)(magic, fd, buf);
@@ -624,7 +700,20 @@
     __msan_unpoison(buf, __sanitizer::struct_stat64_sz);
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT___FXSTAT64 INTERCEPT_FUNCTION(__fxstat64)
+#else
+#define MSAN_MAYBE_INTERCEPT___FXSTAT64
+#endif
 
+#if SANITIZER_FREEBSD
+INTERCEPTOR(int, fstatat, int fd, char *pathname, void *buf, int flags) {
+  ENSURE_MSAN_INITED();
+  int res = REAL(fstatat)(fd, pathname, buf, flags);
+  if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz);
+  return res;
+}
+# define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(fstatat)
+#else
 INTERCEPTOR(int, __fxstatat, int magic, int fd, char *pathname, void *buf,
             int flags) {
   ENSURE_MSAN_INITED();
@@ -632,7 +721,10 @@
   if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz);
   return res;
 }
+# define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(__fxstatat)
+#endif
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(int, __fxstatat64, int magic, int fd, char *pathname, void *buf,
             int flags) {
   ENSURE_MSAN_INITED();
@@ -640,7 +732,21 @@
   if (!res) __msan_unpoison(buf, __sanitizer::struct_stat64_sz);
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT___FXSTATAT64 INTERCEPT_FUNCTION(__fxstatat64)
+#else
+#define MSAN_MAYBE_INTERCEPT___FXSTATAT64
+#endif
 
+#if SANITIZER_FREEBSD
+INTERCEPTOR(int, stat, char *path, void *buf) {
+  ENSURE_MSAN_INITED();
+  int res = REAL(stat)(path, buf);
+  if (!res)
+    __msan_unpoison(buf, __sanitizer::struct_stat_sz);
+  return res;
+}
+# define MSAN_INTERCEPT_STAT INTERCEPT_FUNCTION(stat)
+#else
 INTERCEPTOR(int, __xstat, int magic, char *path, void *buf) {
   ENSURE_MSAN_INITED();
   int res = REAL(__xstat)(magic, path, buf);
@@ -648,7 +754,10 @@
     __msan_unpoison(buf, __sanitizer::struct_stat_sz);
   return res;
 }
+# define MSAN_INTERCEPT_STAT INTERCEPT_FUNCTION(__xstat)
+#endif
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(int, __xstat64, int magic, char *path, void *buf) {
   ENSURE_MSAN_INITED();
   int res = REAL(__xstat64)(magic, path, buf);
@@ -656,7 +765,12 @@
     __msan_unpoison(buf, __sanitizer::struct_stat64_sz);
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT___XSTAT64 INTERCEPT_FUNCTION(__xstat64)
+#else
+#define MSAN_MAYBE_INTERCEPT___XSTAT64
+#endif
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(int, __lxstat, int magic, char *path, void *buf) {
   ENSURE_MSAN_INITED();
   int res = REAL(__lxstat)(magic, path, buf);
@@ -664,7 +778,12 @@
     __msan_unpoison(buf, __sanitizer::struct_stat_sz);
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT___LXSTAT INTERCEPT_FUNCTION(__lxstat)
+#else
+#define MSAN_MAYBE_INTERCEPT___LXSTAT
+#endif
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(int, __lxstat64, int magic, char *path, void *buf) {
   ENSURE_MSAN_INITED();
   int res = REAL(__lxstat64)(magic, path, buf);
@@ -672,6 +791,10 @@
     __msan_unpoison(buf, __sanitizer::struct_stat64_sz);
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT___LXSTAT64 INTERCEPT_FUNCTION(__lxstat64)
+#else
+#define MSAN_MAYBE_INTERCEPT___LXSTAT64
+#endif
 
 INTERCEPTOR(int, pipe, int pipefd[2]) {
   if (msan_init_is_running)
@@ -707,6 +830,7 @@
   return res;
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(char *, fgets_unlocked, char *s, int size, void *stream) {
   ENSURE_MSAN_INITED();
   char *res = REAL(fgets_unlocked)(s, size, stream);
@@ -714,6 +838,10 @@
     __msan_unpoison(s, REAL(strlen)(s) + 1);
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED INTERCEPT_FUNCTION(fgets_unlocked)
+#else
+#define MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED
+#endif
 
 INTERCEPTOR(int, getrlimit, int resource, void *rlim) {
   if (msan_init_is_running)
@@ -725,6 +853,7 @@
   return res;
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(int, getrlimit64, int resource, void *rlim) {
   if (msan_init_is_running)
     return REAL(getrlimit64)(resource, rlim);
@@ -734,15 +863,34 @@
     __msan_unpoison(rlim, __sanitizer::struct_rlimit64_sz);
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT_GETRLIMIT64 INTERCEPT_FUNCTION(getrlimit64)
+#else
+#define MSAN_MAYBE_INTERCEPT_GETRLIMIT64
+#endif
 
-INTERCEPTOR(int, uname, void *utsname) {
+#if SANITIZER_FREEBSD
+// FreeBSD's <sys/utsname.h> define uname() as
+// static __inline int uname(struct utsname *name) {
+//   return __xuname(SYS_NMLN, (void*)name);
+// }
+INTERCEPTOR(int, __xuname, int size, void *utsname) {
   ENSURE_MSAN_INITED();
-  int res = REAL(uname)(utsname);
-  if (!res) {
+  int res = REAL(__xuname)(size, utsname);
+  if (!res)
     __msan_unpoison(utsname, __sanitizer::struct_utsname_sz);
-  }
   return res;
 }
+#define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(__xuname)
+#else
+INTERCEPTOR(int, uname, struct utsname *utsname) {
+  ENSURE_MSAN_INITED();
+  int res = REAL(uname)(utsname);
+  if (!res)
+    __msan_unpoison(utsname, __sanitizer::struct_utsname_sz);
+  return res;
+}
+#define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(uname)
+#endif
 
 INTERCEPTOR(int, gethostname, char *name, SIZE_T len) {
   ENSURE_MSAN_INITED();
@@ -756,6 +904,7 @@
   return res;
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(int, epoll_wait, int epfd, void *events, int maxevents,
     int timeout) {
   ENSURE_MSAN_INITED();
@@ -765,7 +914,12 @@
   }
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT_EPOLL_WAIT INTERCEPT_FUNCTION(epoll_wait)
+#else
+#define MSAN_MAYBE_INTERCEPT_EPOLL_WAIT
+#endif
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(int, epoll_pwait, int epfd, void *events, int maxevents,
     int timeout, void *sigmask) {
   ENSURE_MSAN_INITED();
@@ -775,6 +929,10 @@
   }
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT INTERCEPT_FUNCTION(epoll_pwait)
+#else
+#define MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT
+#endif
 
 INTERCEPTOR(SSIZE_T, recv, int fd, void *buf, SIZE_T len, int flags) {
   ENSURE_MSAN_INITED();
@@ -794,17 +952,15 @@
     __msan_unpoison(buf, res);
     if (srcaddr) {
       SIZE_T sz = *addrlen;
-      __msan_unpoison(srcaddr, (sz < srcaddr_sz) ? sz : srcaddr_sz);
+      __msan_unpoison(srcaddr, Min(sz, srcaddr_sz));
     }
   }
   return res;
 }
 
 INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
-  if (CallocShouldReturnNullDueToOverflow(size, nmemb))
-    return AllocatorReturnNull();
   GET_MALLOC_STACK_TRACE;
-  if (!msan_inited) {
+  if (UNLIKELY(!msan_inited)) {
     // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
     const SIZE_T kCallocPoolSize = 1024;
     static uptr calloc_memory_for_dlsym[kCallocPoolSize];
@@ -815,7 +971,7 @@
     CHECK(allocated < kCallocPoolSize);
     return mem;
   }
-  return MsanReallocate(&stack, 0, nmemb * size, sizeof(u64), true);
+  return MsanCalloc(&stack, nmemb, size);
 }
 
 INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
@@ -828,20 +984,18 @@
   return MsanReallocate(&stack, 0, size, sizeof(u64), false);
 }
 
-void __msan_allocated_memory(const void* data, uptr size) {
+void __msan_allocated_memory(const void *data, uptr size) {
   GET_MALLOC_STACK_TRACE;
-  if (flags()->poison_in_malloc)
-    __msan_poison(data, size);
-  if (__msan_get_track_origins()) {
-    u32 stack_id = StackDepotPut(stack);
-    u32 id;
-    ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
-    __msan_set_origin(data, size, Origin(id, 1).raw_id());
+  if (flags()->poison_in_malloc) {
+    stack.tag = STACK_TRACE_TAG_POISON;
+    PoisonMemory(data, size, &stack);
   }
 }
 
 INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
             int fd, OFF_T offset) {
+  if (msan_init_is_running)
+    return REAL(mmap)(addr, length, prot, flags, fd, offset);
   ENSURE_MSAN_INITED();
   if (addr && !MEM_IS_APP(addr)) {
     if (flags & map_fixed) {
@@ -857,6 +1011,7 @@
   return res;
 }
 
+#if !SANITIZER_FREEBSD
 INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags,
             int fd, OFF64_T offset) {
   ENSURE_MSAN_INITED();
@@ -873,6 +1028,10 @@
     __msan_unpoison(res, RoundUpTo(length, GetPageSize()));
   return res;
 }
+#define MSAN_MAYBE_INTERCEPT_MMAP64 INTERCEPT_FUNCTION(mmap64)
+#else
+#define MSAN_MAYBE_INTERCEPT_MMAP64
+#endif
 
 struct dlinfo {
   char *dli_fname;
@@ -1199,6 +1358,9 @@
   InterceptorScope interceptor_scope;                             \
   __msan_unpoison(__errno_location(), sizeof(int)); /* NOLINT */  \
   ENSURE_MSAN_INITED();
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+  do {                                            \
+  } while (false)
 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
   do {                                         \
   } while (false)
@@ -1216,8 +1378,11 @@
   } while (false)  // FIXME
 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
-#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, map) \
-  if (map) ForEachMappedRegion((link_map *)map, __msan_unpoison);
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)  \
+  do {                                                       \
+    link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE((handle)); \
+    if (map) ForEachMappedRegion(map, __msan_unpoison);      \
+  } while (false)
 
 #include "sanitizer_common/sanitizer_common_interceptors.inc"
 
@@ -1231,53 +1396,26 @@
 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) __msan_unpoison(p, s)
 #include "sanitizer_common/sanitizer_common_syscalls.inc"
 
-static void PoisonShadow(uptr ptr, uptr size, u8 value) {
-  uptr PageSize = GetPageSizeCached();
-  uptr shadow_beg = MEM_TO_SHADOW(ptr);
-  uptr shadow_end = MEM_TO_SHADOW(ptr + size);
-  if (value ||
-      shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
-    REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
-  } else {
-    uptr page_beg = RoundUpTo(shadow_beg, PageSize);
-    uptr page_end = RoundDownTo(shadow_end, PageSize);
-
-    if (page_beg >= page_end) {
-      REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
-    } else {
-      if (page_beg != shadow_beg) {
-        REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
-      }
-      if (page_end != shadow_end) {
-        REAL(memset)((void *)page_end, 0, shadow_end - page_end);
-      }
-      MmapFixedNoReserve(page_beg, page_end - page_beg);
-    }
-  }
-}
-
 // These interface functions reside here so that they can use
 // REAL(memset), etc.
 void __msan_unpoison(const void *a, uptr size) {
   if (!MEM_IS_APP(a)) return;
-  PoisonShadow((uptr)a, size, 0);
+  SetShadow(a, size, 0);
 }
 
 void __msan_poison(const void *a, uptr size) {
   if (!MEM_IS_APP(a)) return;
-  PoisonShadow((uptr)a, size,
-               __msan::flags()->poison_heap_with_zeroes ? 0 : -1);
+  SetShadow(a, size, __msan::flags()->poison_heap_with_zeroes ? 0 : -1);
 }
 
 void __msan_poison_stack(void *a, uptr size) {
   if (!MEM_IS_APP(a)) return;
-  PoisonShadow((uptr)a, size,
-               __msan::flags()->poison_stack_with_zeroes ? 0 : -1);
+  SetShadow(a, size, __msan::flags()->poison_stack_with_zeroes ? 0 : -1);
 }
 
 void __msan_clear_and_unpoison(void *a, uptr size) {
   REAL(memset)(a, 0, size);
-  PoisonShadow((uptr)a, size, 0);
+  SetShadow(a, size, 0);
 }
 
 void *__msan_memcpy(void *dest, const void *src, SIZE_T n) {
@@ -1286,7 +1424,7 @@
   ENSURE_MSAN_INITED();
   GET_STORE_STACK_TRACE;
   void *res = REAL(memcpy)(dest, src, n);
-  CopyPoison(dest, src, n, &stack);
+  CopyShadowAndOrigin(dest, src, n, &stack);
   return res;
 }
 
@@ -1305,7 +1443,7 @@
   ENSURE_MSAN_INITED();
   GET_STORE_STACK_TRACE;
   void *res = REAL(memmove)(dest, src, n);
-  MovePoison(dest, src, n, &stack);
+  MoveShadowAndOrigin(dest, src, n, &stack);
   return res;
 }
 
@@ -1316,119 +1454,29 @@
 
 namespace __msan {
 
-u32 GetOriginIfPoisoned(uptr addr, uptr size) {
-  unsigned char *s = (unsigned char *)MEM_TO_SHADOW(addr);
-  for (uptr i = 0; i < size; ++i)
-    if (s[i])
-      return *(u32 *)SHADOW_TO_ORIGIN((s + i) & ~3UL);
-  return 0;
-}
-
-void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size,
-                         u32 src_origin) {
-  uptr dst_s = MEM_TO_SHADOW(addr);
-  uptr src_s = src_shadow;
-  uptr src_s_end = src_s + size;
-
-  for (; src_s < src_s_end; ++dst_s, ++src_s)
-    if (*(u8 *)src_s) *(u32 *)SHADOW_TO_ORIGIN(dst_s &~3UL) = src_origin;
-}
-
-void CopyOrigin(void *dst, const void *src, uptr size, StackTrace *stack) {
-  if (!__msan_get_track_origins()) return;
-  if (!MEM_IS_APP(dst) || !MEM_IS_APP(src)) return;
-
-  uptr d = (uptr)dst;
-  uptr beg = d & ~3UL;
-  // Copy left unaligned origin if that memory is poisoned.
-  if (beg < d) {
-    u32 o = GetOriginIfPoisoned((uptr)src, d - beg);
-    if (o) {
-      if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
-      *(u32 *)MEM_TO_ORIGIN(beg) = o;
-    }
-    beg += 4;
-  }
-
-  uptr end = (d + size) & ~3UL;
-  // If both ends fall into the same 4-byte slot, we are done.
-  if (end < beg) return;
-
-  // Copy right unaligned origin if that memory is poisoned.
-  if (end < d + size) {
-    u32 o = GetOriginIfPoisoned((uptr)src + (end - d), (d + size) - end);
-    if (o) {
-      if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
-      *(u32 *)MEM_TO_ORIGIN(end) = o;
-    }
-  }
-
-  if (beg < end) {
-    // Align src up.
-    uptr s = ((uptr)src + 3) & ~3UL;
-    // FIXME: factor out to msan_copy_origin_aligned
-    if (__msan_get_track_origins() > 1) {
-      u32 *src = (u32 *)MEM_TO_ORIGIN(s);
-      u32 *src_s = (u32 *)MEM_TO_SHADOW(s);
-      u32 *src_end = (u32 *)MEM_TO_ORIGIN(s + (end - beg));
-      u32 *dst = (u32 *)MEM_TO_ORIGIN(beg);
-      u32 src_o = 0;
-      u32 dst_o = 0;
-      for (; src < src_end; ++src, ++src_s, ++dst) {
-        if (!*src_s) continue;
-        if (*src != src_o) {
-          src_o = *src;
-          dst_o = ChainOrigin(src_o, stack);
-        }
-        *dst = dst_o;
-      }
-    } else {
-      REAL(memcpy)((void *)MEM_TO_ORIGIN(beg), (void *)MEM_TO_ORIGIN(s),
-                  end - beg);
-    }
-  }
-}
-
-void MovePoison(void *dst, const void *src, uptr size, StackTrace *stack) {
-  if (!MEM_IS_APP(dst)) return;
-  if (!MEM_IS_APP(src)) return;
-  if (src == dst) return;
-  REAL(memmove)((void *)MEM_TO_SHADOW((uptr)dst),
-                (void *)MEM_TO_SHADOW((uptr)src), size);
-  CopyOrigin(dst, src, size, stack);
-}
-
-void CopyPoison(void *dst, const void *src, uptr size, StackTrace *stack) {
-  if (!MEM_IS_APP(dst)) return;
-  if (!MEM_IS_APP(src)) return;
-  REAL(memcpy)((void *)MEM_TO_SHADOW((uptr)dst),
-              (void *)MEM_TO_SHADOW((uptr)src), size);
-  CopyOrigin(dst, src, size, stack);
-}
-
 void InitializeInterceptors() {
   static int inited = 0;
   CHECK_EQ(inited, 0);
   InitializeCommonInterceptors();
 
   INTERCEPT_FUNCTION(mmap);
-  INTERCEPT_FUNCTION(mmap64);
+  MSAN_MAYBE_INTERCEPT_MMAP64;
   INTERCEPT_FUNCTION(posix_memalign);
-  INTERCEPT_FUNCTION(memalign);
+  MSAN_MAYBE_INTERCEPT_MEMALIGN;
   INTERCEPT_FUNCTION(__libc_memalign);
   INTERCEPT_FUNCTION(valloc);
-  INTERCEPT_FUNCTION(pvalloc);
+  MSAN_MAYBE_INTERCEPT_PVALLOC;
   INTERCEPT_FUNCTION(malloc);
   INTERCEPT_FUNCTION(calloc);
   INTERCEPT_FUNCTION(realloc);
   INTERCEPT_FUNCTION(free);
-  INTERCEPT_FUNCTION(cfree);
+  MSAN_MAYBE_INTERCEPT_CFREE;
   INTERCEPT_FUNCTION(malloc_usable_size);
-  INTERCEPT_FUNCTION(mallinfo);
-  INTERCEPT_FUNCTION(mallopt);
-  INTERCEPT_FUNCTION(malloc_stats);
+  MSAN_MAYBE_INTERCEPT_MALLINFO;
+  MSAN_MAYBE_INTERCEPT_MALLOPT;
+  MSAN_MAYBE_INTERCEPT_MALLOC_STATS;
   INTERCEPT_FUNCTION(fread);
-  INTERCEPT_FUNCTION(fread_unlocked);
+  MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED;
   INTERCEPT_FUNCTION(readlink);
   INTERCEPT_FUNCTION(memcpy);
   INTERCEPT_FUNCTION(memccpy);
@@ -1443,84 +1491,69 @@
   INTERCEPT_FUNCTION(strcpy);  // NOLINT
   INTERCEPT_FUNCTION(stpcpy);  // NOLINT
   INTERCEPT_FUNCTION(strdup);
-  INTERCEPT_FUNCTION(__strdup);
+  MSAN_MAYBE_INTERCEPT___STRDUP;
   INTERCEPT_FUNCTION(strndup);
-  INTERCEPT_FUNCTION(__strndup);
+  MSAN_MAYBE_INTERCEPT___STRNDUP;
   INTERCEPT_FUNCTION(strncpy);  // NOLINT
   INTERCEPT_FUNCTION(strlen);
   INTERCEPT_FUNCTION(strnlen);
   INTERCEPT_FUNCTION(gcvt);
   INTERCEPT_FUNCTION(strcat);  // NOLINT
   INTERCEPT_FUNCTION(strncat);  // NOLINT
-  INTERCEPT_FUNCTION(strtod);
-  INTERCEPT_FUNCTION(strtof);
-  INTERCEPT_FUNCTION(strtold);
-  INTERCEPT_FUNCTION(strtol);
-  INTERCEPT_FUNCTION(strtoll);
-  INTERCEPT_FUNCTION(strtoul);
-  INTERCEPT_FUNCTION(strtoull);
-  INTERCEPT_FUNCTION(strtod_l);
-  INTERCEPT_FUNCTION(__strtod_l);
-  INTERCEPT_FUNCTION(__strtod_internal);
-  INTERCEPT_FUNCTION(strtof_l);
-  INTERCEPT_FUNCTION(__strtof_l);
-  INTERCEPT_FUNCTION(__strtof_internal);
-  INTERCEPT_FUNCTION(strtold_l);
-  INTERCEPT_FUNCTION(__strtold_l);
-  INTERCEPT_FUNCTION(__strtold_internal);
-  INTERCEPT_FUNCTION(strtol_l);
-  INTERCEPT_FUNCTION(__strtol_l);
-  INTERCEPT_FUNCTION(__strtol_internal);
-  INTERCEPT_FUNCTION(strtoll_l);
-  INTERCEPT_FUNCTION(__strtoll_l);
-  INTERCEPT_FUNCTION(__strtoll_internal);
-  INTERCEPT_FUNCTION(strtoul_l);
-  INTERCEPT_FUNCTION(__strtoul_l);
-  INTERCEPT_FUNCTION(__strtoul_internal);
-  INTERCEPT_FUNCTION(strtoull_l);
-  INTERCEPT_FUNCTION(__strtoull_l);
-  INTERCEPT_FUNCTION(__strtoull_internal);
+  INTERCEPT_STRTO(strtod);
+  INTERCEPT_STRTO(strtof);
+  INTERCEPT_STRTO(strtold);
+  INTERCEPT_STRTO(strtol);
+  INTERCEPT_STRTO(strtoul);
+  INTERCEPT_STRTO(strtoll);
+  INTERCEPT_STRTO(strtoull);
+  INTERCEPT_STRTO(wcstod);
+  INTERCEPT_STRTO(wcstof);
+  INTERCEPT_STRTO(wcstold);
+  INTERCEPT_STRTO(wcstol);
+  INTERCEPT_STRTO(wcstoul);
+  INTERCEPT_STRTO(wcstoll);
+  INTERCEPT_STRTO(wcstoull);
   INTERCEPT_FUNCTION(vswprintf);
   INTERCEPT_FUNCTION(swprintf);
   INTERCEPT_FUNCTION(strxfrm);
   INTERCEPT_FUNCTION(strxfrm_l);
   INTERCEPT_FUNCTION(strftime);
   INTERCEPT_FUNCTION(strftime_l);
-  INTERCEPT_FUNCTION(__strftime_l);
+  MSAN_MAYBE_INTERCEPT___STRFTIME_L;
   INTERCEPT_FUNCTION(wcsftime);
   INTERCEPT_FUNCTION(wcsftime_l);
-  INTERCEPT_FUNCTION(__wcsftime_l);
+  MSAN_MAYBE_INTERCEPT___WCSFTIME_L;
   INTERCEPT_FUNCTION(mbtowc);
   INTERCEPT_FUNCTION(mbrtowc);
   INTERCEPT_FUNCTION(wcslen);
   INTERCEPT_FUNCTION(wcschr);
   INTERCEPT_FUNCTION(wcscpy);
   INTERCEPT_FUNCTION(wcscmp);
-  INTERCEPT_FUNCTION(wcstod);
   INTERCEPT_FUNCTION(getenv);
   INTERCEPT_FUNCTION(setenv);
   INTERCEPT_FUNCTION(putenv);
   INTERCEPT_FUNCTION(gettimeofday);
   INTERCEPT_FUNCTION(fcvt);
-  INTERCEPT_FUNCTION(__fxstat);
-  INTERCEPT_FUNCTION(__fxstatat);
-  INTERCEPT_FUNCTION(__xstat);
-  INTERCEPT_FUNCTION(__lxstat);
-  INTERCEPT_FUNCTION(__fxstat64);
-  INTERCEPT_FUNCTION(__fxstatat64);
-  INTERCEPT_FUNCTION(__xstat64);
-  INTERCEPT_FUNCTION(__lxstat64);
+  MSAN_MAYBE_INTERCEPT___FXSTAT;
+  MSAN_INTERCEPT_FSTATAT;
+  MSAN_INTERCEPT_STAT;
+  MSAN_MAYBE_INTERCEPT___LXSTAT;
+  MSAN_MAYBE_INTERCEPT___FXSTAT64;
+  MSAN_MAYBE_INTERCEPT___FXSTATAT64;
+  MSAN_MAYBE_INTERCEPT___XSTAT64;
+  MSAN_MAYBE_INTERCEPT___LXSTAT64;
   INTERCEPT_FUNCTION(pipe);
   INTERCEPT_FUNCTION(pipe2);
   INTERCEPT_FUNCTION(socketpair);
   INTERCEPT_FUNCTION(fgets);
-  INTERCEPT_FUNCTION(fgets_unlocked);
+  MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED;
   INTERCEPT_FUNCTION(getrlimit);
-  INTERCEPT_FUNCTION(getrlimit64);
-  INTERCEPT_FUNCTION(uname);
+  MSAN_MAYBE_INTERCEPT_GETRLIMIT64;
+  MSAN_INTERCEPT_UNAME;
   INTERCEPT_FUNCTION(gethostname);
-  INTERCEPT_FUNCTION(epoll_wait);
-  INTERCEPT_FUNCTION(epoll_pwait);
+  MSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
+  MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT;
   INTERCEPT_FUNCTION(recv);
   INTERCEPT_FUNCTION(recvfrom);
   INTERCEPT_FUNCTION(dladdr);
diff --git a/lib/msan/msan_interface_internal.h b/lib/msan/msan_interface_internal.h
index 8641f81..f4d37d9 100644
--- a/lib/msan/msan_interface_internal.h
+++ b/lib/msan/msan_interface_internal.h
@@ -96,6 +96,13 @@
 SANITIZER_INTERFACE_ATTRIBUTE
 u32 __msan_get_origin(const void *a);
 
+// Test that this_id is a descendant of prev_id (or they are simply equal).
+// "descendant" here means that are part of the same chain, created with
+// __msan_chain_origin.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __msan_origin_is_descendant_or_same(u32 this_id, u32 prev_id);
+
+
 SANITIZER_INTERFACE_ATTRIBUTE
 void __msan_clear_on_return();
 
diff --git a/lib/msan/msan_linux.cc b/lib/msan/msan_linux.cc
index 2a970c0..6c18516 100644
--- a/lib/msan/msan_linux.cc
+++ b/lib/msan/msan_linux.cc
@@ -9,11 +9,11 @@
 //
 // This file is a part of MemorySanitizer.
 //
-// Linux-specific code.
+// Linux- and FreeBSD-specific code.
 //===----------------------------------------------------------------------===//
 
 #include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
 
 #include "msan.h"
 #include "msan_thread.h"
@@ -35,64 +35,107 @@
 
 namespace __msan {
 
-#if defined(__mips64)
-static const uptr kMemBeg     = 0xe000000000;
-static const uptr kMemEnd     = 0xffffffffff;
-#elif defined(__x86_64__)
-static const uptr kMemBeg     = 0x600000000000;
-static const uptr kMemEnd     = 0x7fffffffffff;
-#endif
-
-static const uptr kShadowBeg  = MEM_TO_SHADOW(kMemBeg);
-static const uptr kShadowEnd  = MEM_TO_SHADOW(kMemEnd);
-static const uptr kBad1Beg    = 0;
-static const uptr kBad1End    = kShadowBeg - 1;
-static const uptr kBad2Beg    = kShadowEnd + 1;
-static const uptr kBad2End    = kMemBeg - 1;
-static const uptr kOriginsBeg = kBad2Beg;
-static const uptr kOriginsEnd = kBad2End;
-
-bool InitShadow(bool prot1, bool prot2, bool map_shadow, bool init_origins) {
-  if ((uptr) & InitShadow < kMemBeg) {
-    Printf("FATAL: Code below application range: %p < %p. Non-PIE build?\n",
-           &InitShadow, (void *)kMemBeg);
-    return false;
+void ReportMapRange(const char *descr, uptr beg, uptr size) {
+  if (size > 0) {
+    uptr end = beg + size - 1;
+    VPrintf(1, "%s : %p - %p\n", descr, beg, end);
   }
+}
 
-  VPrintf(1, "__msan_init %p\n", &__msan_init);
-  VPrintf(1, "Memory   : %p %p\n", kMemBeg, kMemEnd);
-  VPrintf(1, "Bad2     : %p %p\n", kBad2Beg, kBad2End);
-  VPrintf(1, "Origins  : %p %p\n", kOriginsBeg, kOriginsEnd);
-  VPrintf(1, "Shadow   : %p %p\n", kShadowBeg, kShadowEnd);
-  VPrintf(1, "Bad1     : %p %p\n", kBad1Beg, kBad1End);
-
-  if (!MemoryRangeIsAvailable(kShadowBeg,
-                              init_origins ? kOriginsEnd : kShadowEnd) ||
-      (prot1 && !MemoryRangeIsAvailable(kBad1Beg, kBad1End)) ||
-      (prot2 && !MemoryRangeIsAvailable(kBad2Beg, kBad2End))) {
-    Printf("FATAL: Shadow memory range is not available.\n");
-    return false;
-  }
-
-  if (prot1 && !Mprotect(kBad1Beg, kBad1End - kBad1Beg))
-    return false;
-  if (prot2 && !Mprotect(kBad2Beg, kBad2End - kBad2Beg))
-    return false;
-  if (map_shadow) {
-    void *shadow = MmapFixedNoReserve(kShadowBeg, kShadowEnd - kShadowBeg);
-    if (shadow != (void*)kShadowBeg) return false;
-  }
-  if (init_origins) {
-    void *origins = MmapFixedNoReserve(kOriginsBeg, kOriginsEnd - kOriginsBeg);
-    if (origins != (void*)kOriginsBeg) return false;
+static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
+  if (size > 0) {
+    uptr end = beg + size - 1;
+    if (!MemoryRangeIsAvailable(beg, end)) {
+      Printf("FATAL: Memory range %p - %p is not available.\n", beg, end);
+      return false;
+    }
   }
   return true;
 }
 
+static bool ProtectMemoryRange(uptr beg, uptr size) {
+  if (size > 0) {
+    uptr end = beg + size - 1;
+    if (!Mprotect(beg, size)) {
+      Printf("FATAL: Cannot protect memory range %p - %p.\n", beg, end);
+      return false;
+    }
+  }
+  return true;
+}
+
+static void CheckMemoryLayoutSanity() {
+  uptr prev_end = 0;
+  for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
+    uptr start = kMemoryLayout[i].start;
+    uptr end = kMemoryLayout[i].end;
+    MappingDesc::Type type = kMemoryLayout[i].type;
+    CHECK_LT(start, end);
+    CHECK_EQ(prev_end, start);
+    CHECK(addr_is_type(start, type));
+    CHECK(addr_is_type((start + end) / 2, type));
+    CHECK(addr_is_type(end - 1, type));
+    if (type == MappingDesc::APP) {
+      uptr addr = start;
+      CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+      CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+      CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+
+      addr = (start + end) / 2;
+      CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+      CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+      CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+
+      addr = end - 1;
+      CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+      CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+      CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+    }
+    prev_end = end;
+  }
+}
+
+bool InitShadow(bool map_shadow, bool init_origins) {
+  // Let user know mapping parameters first.
+  VPrintf(1, "__msan_init %p\n", &__msan_init);
+  for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
+    VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
+            kMemoryLayout[i].end - 1);
+
+  CheckMemoryLayoutSanity();
+
+  if (!MEM_IS_APP(&__msan_init)) {
+    Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
+           (uptr)&__msan_init);
+    return false;
+  }
+
+  for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
+    uptr start = kMemoryLayout[i].start;
+    uptr end = kMemoryLayout[i].end;
+    uptr size= end - start;
+    MappingDesc::Type type = kMemoryLayout[i].type;
+    if ((map_shadow && type == MappingDesc::SHADOW) ||
+        (init_origins && type == MappingDesc::ORIGIN)) {
+      if (!CheckMemoryRangeAvailability(start, size)) return false;
+      if ((uptr)MmapFixedNoReserve(start, size) != start) return false;
+      if (common_flags()->use_madv_dontdump)
+        DontDumpShadowMemory(start, size);
+    } else if (type == MappingDesc::INVALID) {
+      if (!CheckMemoryRangeAvailability(start, size)) return false;
+      if (!ProtectMemoryRange(start, size)) return false;
+    }
+  }
+
+  return true;
+}
+
 void MsanDie() {
+  if (common_flags()->coverage)
+    __sanitizer_cov_dump();
   if (death_callback)
     death_callback();
-  _exit(flags()->exit_code);
+  internal__exit(flags()->exit_code);
 }
 
 static void MsanAtExit(void) {
@@ -112,20 +155,26 @@
 
 static pthread_key_t tsd_key;
 static bool tsd_key_inited = false;
+
 void MsanTSDInit(void (*destructor)(void *tsd)) {
   CHECK(!tsd_key_inited);
   tsd_key_inited = true;
   CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
 }
 
-void *MsanTSDGet() {
-  CHECK(tsd_key_inited);
-  return pthread_getspecific(tsd_key);
+static THREADLOCAL MsanThread* msan_current_thread;
+
+MsanThread *GetCurrentThread() {
+  return msan_current_thread;
 }
 
-void MsanTSDSet(void *tsd) {
+void SetCurrentThread(MsanThread *t) {
+  // Make sure we do not reset the current MsanThread.
+  CHECK_EQ(0, msan_current_thread);
+  msan_current_thread = t;
+  // Make sure that MsanTSDDtor gets called at the end.
   CHECK(tsd_key_inited);
-  pthread_setspecific(tsd_key, tsd);
+  pthread_setspecific(tsd_key, (void *)t);
 }
 
 void MsanTSDDtor(void *tsd) {
@@ -135,9 +184,12 @@
     CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
     return;
   }
+  msan_current_thread = nullptr;
+  // Make sure that signal handler can not see a stale current thread pointer.
+  atomic_signal_fence(memory_order_seq_cst);
   MsanThread::TSDDtor(tsd);
 }
 
 }  // namespace __msan
 
-#endif  // __linux__
+#endif  // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/lib/msan/msan_origin.h b/lib/msan/msan_origin.h
index a415650..36c168b 100644
--- a/lib/msan/msan_origin.h
+++ b/lib/msan/msan_origin.h
@@ -12,6 +12,9 @@
 #ifndef MSAN_ORIGIN_H
 #define MSAN_ORIGIN_H
 
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "msan_chained_origin_depot.h"
+
 namespace __msan {
 
 // Origin handling.
@@ -20,9 +23,22 @@
 // the program and describes, more or less exactly, how this memory came to be
 // uninitialized.
 //
-// Origin ids are values of ChainedOriginDepot, which is a mapping of (stack_id,
-// prev_id) -> id, where
-//  * stack_id describes an event in the program, usually a memory store.
+// There are 3 kinds of origin ids:
+// 1xxx xxxx xxxx xxxx   heap origin id
+// 0000 xxxx xxxx xxxx   stack origin id
+// 0zzz xxxx xxxx xxxx   chained origin id
+//
+// Heap origin id describes a heap memory allocation and contains (in the xxx
+// part) a value of StackDepot.
+//
+// Stack origin id describes a stack memory allocation and contains (in the xxx
+// part) an index into StackOriginDescr and StackOriginPC. We don't store a
+// stack trace for such origins for performance reasons.
+//
+// Chained origin id describes an event of storing an uninitialized value to
+// memory. The xxx part is a value of ChainedOriginDepot, which is a mapping of
+// (stack_id, prev_id) -> id, where
+//  * stack_id describes the event.
 //    StackDepot keeps a mapping between those and corresponding stack traces.
 //  * prev_id is another origin id that describes the earlier part of the
 //    uninitialized value history.
@@ -33,43 +49,119 @@
 // points in value history marked with origin ids, and edges are events that are
 // marked with stack_id.
 //
-// There are 2 special root origin ids:
-// * kHeapRoot - an origin with prev_id == kHeapRoot describes an event of
-//   allocating memory from heap.
-// * kStackRoot - an origin with prev_id == kStackRoot describes an event of
-//   allocating memory from stack (i.e. on function entry).
-// Note that ChainedOriginDepot does not store any node for kHeapRoot or
-// kStackRoot. These are just special id values.
-//
-// Three highest bits of origin id are used to store the length (or depth) of
-// the origin chain. Special depth value of 0 means unlimited.
+// The "zzz" bits of chained origin id are used to store the length (or depth)
+// of the origin chain.
 
 class Origin {
  public:
-  static const int kDepthBits = 3;
-  static const int kDepthShift = 32 - kDepthBits;
-  static const u32 kIdMask = ((u32)-1) >> (32 - kDepthShift);
-  static const u32 kDepthMask = ~kIdMask;
+  static bool isValidId(u32 id) { return id != 0 && id != (u32)-1; }
 
-  static const int kMaxDepth = (1 << kDepthBits) - 1;
-
-  static const u32 kHeapRoot = (u32)-1;
-  static const u32 kStackRoot = (u32)-2;
-
-  explicit Origin(u32 raw_id) : raw_id_(raw_id) {}
-  Origin(u32 id, u32 depth) : raw_id_((depth << kDepthShift) | id) {
-    CHECK_EQ(this->depth(), depth);
-    CHECK_EQ(this->id(), id);
-  }
-  int depth() const { return raw_id_ >> kDepthShift; }
-  u32 id() const { return raw_id_ & kIdMask; }
   u32 raw_id() const { return raw_id_; }
-  bool isStackRoot() const { return raw_id_ == kStackRoot; }
-  bool isHeapRoot() const { return raw_id_ == kHeapRoot; }
-  bool isValid() const { return raw_id_ != 0 && raw_id_ != (u32)-1; }
+  bool isHeapOrigin() const {
+    // 1xxx xxxx xxxx xxxx
+    return raw_id_ >> kHeapShift == 0;
+  }
+  bool isStackOrigin() const {
+    // 1000 xxxx xxxx xxxx
+    return (raw_id_ >> kDepthShift) == (1 << kDepthBits);
+  }
+  bool isChainedOrigin() const {
+    // 1zzz xxxx xxxx xxxx, zzz != 000
+    return (raw_id_ >> kDepthShift) > (1 << kDepthBits);
+  }
+  u32 getChainedId() const {
+    CHECK(isChainedOrigin());
+    return raw_id_ & kChainedIdMask;
+  }
+  u32 getStackId() const {
+    CHECK(isStackOrigin());
+    return raw_id_ & kChainedIdMask;
+  }
+  u32 getHeapId() const {
+    CHECK(isHeapOrigin());
+    return raw_id_ & kHeapIdMask;
+  }
+
+  // Returns the next origin in the chain and the current stack trace.
+  Origin getNextChainedOrigin(StackTrace *stack) const {
+    CHECK(isChainedOrigin());
+    u32 prev_id;
+    u32 stack_id = ChainedOriginDepotGet(getChainedId(), &prev_id);
+    if (stack) *stack = StackDepotGet(stack_id);
+    return Origin(prev_id);
+  }
+
+  StackTrace getStackTraceForHeapOrigin() const {
+    return StackDepotGet(getHeapId());
+  }
+
+  static Origin CreateStackOrigin(u32 id) {
+    CHECK((id & kStackIdMask) == id);
+    return Origin((1 << kHeapShift) | id);
+  }
+
+  static Origin CreateHeapOrigin(StackTrace *stack) {
+    u32 stack_id = StackDepotPut(*stack);
+    CHECK(stack_id);
+    CHECK((stack_id & kHeapIdMask) == stack_id);
+    return Origin(stack_id);
+  }
+
+  static Origin CreateChainedOrigin(Origin prev, StackTrace *stack) {
+    int depth = prev.isChainedOrigin() ? prev.depth() : 0;
+    // depth is the length of the chain minus 1.
+    // origin_history_size of 0 means unlimited depth.
+    if (flags()->origin_history_size > 0) {
+      if (depth + 1 >= flags()->origin_history_size) {
+        return prev;
+      } else {
+        ++depth;
+        CHECK(depth < (1 << kDepthBits));
+      }
+    }
+
+    StackDepotHandle h = StackDepotPut_WithHandle(*stack);
+    if (!h.valid()) return prev;
+
+    if (flags()->origin_history_per_stack_limit > 0) {
+      int use_count = h.use_count();
+      if (use_count > flags()->origin_history_per_stack_limit) return prev;
+    }
+
+    u32 chained_id;
+    bool inserted = ChainedOriginDepotPut(h.id(), prev.raw_id(), &chained_id);
+    CHECK((chained_id & kChainedIdMask) == chained_id);
+
+    if (inserted && flags()->origin_history_per_stack_limit > 0)
+      h.inc_use_count_unsafe();
+
+    return Origin((1 << kHeapShift) | (depth << kDepthShift) | chained_id);
+  }
+
+  static Origin FromRawId(u32 id) {
+    return Origin(id);
+  }
 
  private:
+  static const int kDepthBits = 3;
+  static const int kDepthShift = 32 - kDepthBits - 1;
+
+  static const int kHeapShift = 31;
+  static const u32 kChainedIdMask = ((u32)-1) >> (32 - kDepthShift);
+  static const u32 kStackIdMask = ((u32)-1) >> (32 - kDepthShift);
+  static const u32 kHeapIdMask = ((u32)-1) >> (32 - kHeapShift);
+
   u32 raw_id_;
+
+  explicit Origin(u32 raw_id) : raw_id_(raw_id) {}
+
+  int depth() const {
+    CHECK(isChainedOrigin());
+    return (raw_id_ >> kDepthShift) & ((1 << kDepthBits) - 1);
+  }
+
+ public:
+  static const int kMaxDepth = (1 << kDepthBits) - 1;
 };
 
 }  // namespace __msan
diff --git a/lib/msan/msan_poisoning.cc b/lib/msan/msan_poisoning.cc
new file mode 100644
index 0000000..96411fd
--- /dev/null
+++ b/lib/msan/msan_poisoning.cc
@@ -0,0 +1,174 @@
+//===-- msan_poisoning.cc ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "msan_poisoning.h"
+
+#include "interception/interception.h"
+#include "msan_origin.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
+DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
+DECLARE_REAL(void *, memmove, void *dest, const void *src, uptr n)
+
+namespace __msan {
+
+u32 GetOriginIfPoisoned(uptr addr, uptr size) {
+  unsigned char *s = (unsigned char *)MEM_TO_SHADOW(addr);
+  for (uptr i = 0; i < size; ++i)
+    if (s[i]) return *(u32 *)SHADOW_TO_ORIGIN(((uptr)s + i) & ~3UL);
+  return 0;
+}
+
+void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size,
+                         u32 src_origin) {
+  uptr dst_s = MEM_TO_SHADOW(addr);
+  uptr src_s = src_shadow;
+  uptr src_s_end = src_s + size;
+
+  for (; src_s < src_s_end; ++dst_s, ++src_s)
+    if (*(u8 *)src_s) *(u32 *)SHADOW_TO_ORIGIN(dst_s & ~3UL) = src_origin;
+}
+
+void CopyOrigin(const void *dst, const void *src, uptr size,
+                StackTrace *stack) {
+  if (!MEM_IS_APP(dst) || !MEM_IS_APP(src)) return;
+
+  uptr d = (uptr)dst;
+  uptr beg = d & ~3UL;
+  // Copy left unaligned origin if that memory is poisoned.
+  if (beg < d) {
+    u32 o = GetOriginIfPoisoned((uptr)src, d - beg);
+    if (o) {
+      if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
+      *(u32 *)MEM_TO_ORIGIN(beg) = o;
+    }
+    beg += 4;
+  }
+
+  uptr end = (d + size) & ~3UL;
+  // If both ends fall into the same 4-byte slot, we are done.
+  if (end < beg) return;
+
+  // Copy right unaligned origin if that memory is poisoned.
+  if (end < d + size) {
+    u32 o = GetOriginIfPoisoned((uptr)src + (end - d), (d + size) - end);
+    if (o) {
+      if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
+      *(u32 *)MEM_TO_ORIGIN(end) = o;
+    }
+  }
+
+  if (beg < end) {
+    // Align src up.
+    uptr s = ((uptr)src + 3) & ~3UL;
+    // FIXME: factor out to msan_copy_origin_aligned
+    if (__msan_get_track_origins() > 1) {
+      u32 *src = (u32 *)MEM_TO_ORIGIN(s);
+      u32 *src_s = (u32 *)MEM_TO_SHADOW(s);
+      u32 *src_end = (u32 *)MEM_TO_ORIGIN(s + (end - beg));
+      u32 *dst = (u32 *)MEM_TO_ORIGIN(beg);
+      u32 src_o = 0;
+      u32 dst_o = 0;
+      for (; src < src_end; ++src, ++src_s, ++dst) {
+        if (!*src_s) continue;
+        if (*src != src_o) {
+          src_o = *src;
+          dst_o = ChainOrigin(src_o, stack);
+        }
+        *dst = dst_o;
+      }
+    } else {
+      REAL(memcpy)((void *)MEM_TO_ORIGIN(beg), (void *)MEM_TO_ORIGIN(s),
+                   end - beg);
+    }
+  }
+}
+
+void MoveShadowAndOrigin(const void *dst, const void *src, uptr size,
+                         StackTrace *stack) {
+  if (!MEM_IS_APP(dst)) return;
+  if (!MEM_IS_APP(src)) return;
+  if (src == dst) return;
+  REAL(memmove)((void *)MEM_TO_SHADOW((uptr)dst),
+                (void *)MEM_TO_SHADOW((uptr)src), size);
+  if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
+}
+
+void CopyShadowAndOrigin(const void *dst, const void *src, uptr size,
+                         StackTrace *stack) {
+  if (!MEM_IS_APP(dst)) return;
+  if (!MEM_IS_APP(src)) return;
+  REAL(memcpy)((void *)MEM_TO_SHADOW((uptr)dst),
+               (void *)MEM_TO_SHADOW((uptr)src), size);
+  if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
+}
+
+void CopyMemory(void *dst, const void *src, uptr size, StackTrace *stack) {
+  REAL(memcpy)(dst, src, size);
+  CopyShadowAndOrigin(dst, src, size, stack);
+}
+
+void SetShadow(const void *ptr, uptr size, u8 value) {
+  uptr PageSize = GetPageSizeCached();
+  uptr shadow_beg = MEM_TO_SHADOW(ptr);
+  uptr shadow_end = MEM_TO_SHADOW((uptr)ptr + size);
+  if (value ||
+      shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
+    REAL(memset)((void *)shadow_beg, value, shadow_end - shadow_beg);
+  } else {
+    uptr page_beg = RoundUpTo(shadow_beg, PageSize);
+    uptr page_end = RoundDownTo(shadow_end, PageSize);
+
+    if (page_beg >= page_end) {
+      REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
+    } else {
+      if (page_beg != shadow_beg) {
+        REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
+      }
+      if (page_end != shadow_end) {
+        REAL(memset)((void *)page_end, 0, shadow_end - page_end);
+      }
+      MmapFixedNoReserve(page_beg, page_end - page_beg);
+    }
+  }
+}
+
+void SetOrigin(const void *dst, uptr size, u32 origin) {
+  // Origin mapping is 4 bytes per 4 bytes of application memory.
+  // Here we extend the range such that its left and right bounds are both
+  // 4 byte aligned.
+  uptr x = MEM_TO_ORIGIN((uptr)dst);
+  uptr beg = x & ~3UL;               // align down.
+  uptr end = (x + size + 3) & ~3UL;  // align up.
+  u64 origin64 = ((u64)origin << 32) | origin;
+  // This is like memset, but the value is 32-bit. We unroll by 2 to write
+  // 64 bits at once. May want to unroll further to get 128-bit stores.
+  if (beg & 7ULL) {
+    *(u32 *)beg = origin;
+    beg += 4;
+  }
+  for (uptr addr = beg; addr < (end & ~7UL); addr += 8) *(u64 *)addr = origin64;
+  if (end & 7ULL) *(u32 *)(end - 4) = origin;
+}
+
+void PoisonMemory(const void *dst, uptr size, StackTrace *stack) {
+  SetShadow(dst, size, (u8)-1);
+
+  if (__msan_get_track_origins()) {
+    Origin o = Origin::CreateHeapOrigin(stack);
+    SetOrigin(dst, size, o.raw_id());
+  }
+}
+
+}  // namespace __msan
diff --git a/lib/msan/msan_poisoning.h b/lib/msan/msan_poisoning.h
new file mode 100644
index 0000000..edacbee
--- /dev/null
+++ b/lib/msan/msan_poisoning.h
@@ -0,0 +1,59 @@
+//===-- msan_poisoning.h ----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MSAN_POISONING_H
+#define MSAN_POISONING_H
+
+#include "msan.h"
+
+namespace __msan {
+
+// Return origin for the first poisoned byte in the memory range, or 0.
+u32 GetOriginIfPoisoned(uptr addr, uptr size);
+
+// Walk [addr, addr+size) app memory region, copying origin tags from the
+// corresponding positions in [src_origin, src_origin+size) where the
+// corresponding shadow in [src_shadow, src_shadow+size) is non-zero.
+void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size, u32 src_origin);
+
+// Copy origin from src (app address) to dst (app address), creating chained
+// origin ids as necessary, without overriding origin for fully initialized
+// quads.
+void CopyOrigin(const void *dst, const void *src, uptr size, StackTrace *stack);
+
+// memmove() shadow and origin. Dst and src are application addresses.
+// See CopyOrigin() for the origin copying logic.
+void MoveShadowAndOrigin(const void *dst, const void *src, uptr size,
+                         StackTrace *stack);
+
+// memcpy() shadow and origin. Dst and src are application addresses.
+// See CopyOrigin() for the origin copying logic.
+void CopyShadowAndOrigin(const void *dst, const void *src, uptr size,
+                         StackTrace *stack);
+
+// memcpy() app memory, and do "the right thing" to the corresponding shadow and
+// origin regions.
+void CopyMemory(void *dst, const void *src, uptr size, StackTrace *stack);
+
+// Fill shadow will value. Ptr is an application address.
+void SetShadow(const void *ptr, uptr size, u8 value);
+
+// Set origin for the memory region.
+void SetOrigin(const void *dst, uptr size, u32 origin);
+
+// Mark memory region uninitialized, with origins.
+void PoisonMemory(const void *dst, uptr size, StackTrace *stack);
+
+}  // namespace __msan
+
+#endif  // MSAN_POISONING_H
diff --git a/lib/msan/msan_report.cc b/lib/msan/msan_report.cc
index f4978c7..33c28b2 100644
--- a/lib/msan/msan_report.cc
+++ b/lib/msan/msan_report.cc
@@ -61,34 +61,38 @@
 static void DescribeOrigin(u32 id) {
   VPrintf(1, "  raw origin id: %d\n", id);
   Decorator d;
-  while (true) {
-    Origin o(id);
-    if (!o.isValid()) {
-      Printf("  %sinvalid origin id(%d)%s\n", d.Warning(), id, d.End());
-      break;
+  Origin o = Origin::FromRawId(id);
+  while (o.isChainedOrigin()) {
+    StackTrace stack;
+    o = o.getNextChainedOrigin(&stack);
+    Printf("  %sUninitialized value was stored to memory at%s\n", d.Origin(),
+        d.End());
+    stack.Print();
+  }
+  if (o.isStackOrigin()) {
+    uptr pc;
+    const char *so = GetStackOriginDescr(o.getStackId(), &pc);
+    DescribeStackOrigin(so, pc);
+  } else {
+    StackTrace stack = o.getStackTraceForHeapOrigin();
+    switch (stack.tag) {
+      case StackTrace::TAG_ALLOC:
+        Printf("  %sUninitialized value was created by a heap allocation%s\n",
+               d.Origin(), d.End());
+        break;
+      case StackTrace::TAG_DEALLOC:
+        Printf("  %sUninitialized value was created by a heap deallocation%s\n",
+               d.Origin(), d.End());
+        break;
+      case STACK_TRACE_TAG_POISON:
+        Printf("  %sMemory was marked as uninitialized%s\n", d.Origin(),
+               d.End());
+        break;
+      default:
+        Printf("  %sUninitialized value was created%s\n", d.Origin(), d.End());
+        break;
     }
-    u32 prev_id;
-    u32 stack_id = ChainedOriginDepotGet(o.id(), &prev_id);
-    Origin prev_o(prev_id);
-
-    if (prev_o.isStackRoot()) {
-      uptr pc;
-      const char *so = GetStackOriginDescr(stack_id, &pc);
-      DescribeStackOrigin(so, pc);
-      break;
-    } else if (prev_o.isHeapRoot()) {
-      Printf("  %sUninitialized value was created by a heap allocation%s\n",
-             d.Origin(), d.End());
-      StackDepotGet(stack_id).Print();
-      break;
-    } else {
-      // chained origin
-      // FIXME: copied? modified? passed through? observed?
-      Printf("  %sUninitialized value was stored to memory at%s\n", d.Origin(),
-             d.End());
-      StackDepotGet(stack_id).Print();
-      id = prev_id;
-    }
+    stack.Print();
   }
 }
 
@@ -266,7 +270,7 @@
   Printf("%sUninitialized bytes in %s%s%s at offset %zu inside [%p, %zu)%s\n",
          d.Warning(), d.Name(), what, d.Warning(), offset, start, size,
          d.End());
-  if (__sanitizer::common_flags()->verbosity > 0)
+  if (__sanitizer::Verbosity())
     DescribeMemoryRange(start, size);
 }
 
diff --git a/lib/msan/msan_thread.cc b/lib/msan/msan_thread.cc
index 2a1e05a..e15a247 100644
--- a/lib/msan/msan_thread.cc
+++ b/lib/msan/msan_thread.cc
@@ -36,6 +36,7 @@
   if (tls_begin_ != tls_end_)
     __msan_unpoison((void *)tls_begin_, tls_end_ - tls_begin_);
   DTLS *dtls = DTLS_Get();
+  CHECK_NE(dtls, 0);
   for (uptr i = 0; i < dtls->dtv_size; ++i)
     __msan_unpoison((void *)(dtls->dtv[i].beg), dtls->dtv[i].size);
 }
@@ -78,15 +79,4 @@
   return res;
 }
 
-MsanThread *GetCurrentThread() {
-  return reinterpret_cast<MsanThread *>(MsanTSDGet());
-}
-
-void SetCurrentThread(MsanThread *t) {
-  // Make sure we do not reset the current MsanThread.
-  CHECK_EQ(0, MsanTSDGet());
-  MsanTSDSet(t);
-  CHECK_EQ(t, MsanTSDGet());
-}
-
 } // namespace __msan
diff --git a/lib/msan/tests/CMakeLists.txt b/lib/msan/tests/CMakeLists.txt
index f3c11ba..e008bd3 100644
--- a/lib/msan/tests/CMakeLists.txt
+++ b/lib/msan/tests/CMakeLists.txt
@@ -15,12 +15,11 @@
 set(MSAN_LOADABLE_SOURCE msan_loadable.cc)
 set(MSAN_UNITTEST_HEADERS
   msan_test_config.h
-  msandr_test_so.h
   ../../../include/sanitizer/msan_interface.h
 )
-set(MSANDR_UNITTEST_SOURCE msandr_test_so.cc)
 set(MSAN_UNITTEST_COMMON_CFLAGS
   -I${COMPILER_RT_LIBCXX_PATH}/include
+  ${COMPILER_RT_TEST_CFLAGS}
   ${COMPILER_RT_GTEST_CFLAGS}
   -I${COMPILER_RT_SOURCE_DIR}/include
   -I${COMPILER_RT_SOURCE_DIR}/lib
@@ -113,25 +112,13 @@
   msan_compile(MSAN_INST_LOADABLE_OBJECTS ${MSAN_LOADABLE_SOURCE} ${arch} "${kind}"
                ${MSAN_UNITTEST_INSTRUMENTED_CFLAGS} ${ARGN})
 
-  # Uninstrumented shared object for MSanDR tests.
-  set(MSANDR_TEST_OBJECTS)
-  msan_compile(MSANDR_TEST_OBJECTS ${MSANDR_UNITTEST_SOURCE} ${arch} "${kind}"
-               ${MSAN_UNITTEST_COMMON_CFLAGS})
-
   # Instrumented loadable library tests.
   set(MSAN_LOADABLE_SO)
   msan_link_shared(MSAN_LOADABLE_SO "libmsan_loadable" ${arch} "${kind}"
                    OBJECTS ${MSAN_INST_LOADABLE_OBJECTS}
                    DEPS ${MSAN_INST_LOADABLE_OBJECTS})
 
-  # Uninstrumented shared library tests.
-  set(MSANDR_TEST_SO)
-  msan_link_shared(MSANDR_TEST_SO "libmsandr_test" ${arch} "${kind}"
-                   OBJECTS ${MSANDR_TEST_OBJECTS}
-                   DEPS ${MSANDR_TEST_OBJECTS})
-
-  set(MSAN_TEST_OBJECTS ${MSAN_INST_TEST_OBJECTS} ${MSAN_INST_GTEST}
-                        ${MSANDR_TEST_SO})
+  set(MSAN_TEST_OBJECTS ${MSAN_INST_TEST_OBJECTS} ${MSAN_INST_GTEST})
   set(MSAN_TEST_DEPS ${MSAN_TEST_OBJECTS} libcxx_msan${kind}
                      ${MSAN_LOADABLE_SO})
   if(NOT COMPILER_RT_STANDALONE_BUILD)
@@ -149,9 +136,9 @@
 
 # We should only build MSan unit tests if we can build instrumented libcxx.
 if(COMPILER_RT_CAN_EXECUTE_TESTS AND COMPILER_RT_HAS_LIBCXX_SOURCES)
-  if(CAN_TARGET_x86_64)
-    add_msan_tests_for_arch(x86_64 "")
-    add_msan_tests_for_arch(x86_64 "-with-call"
+  foreach(arch ${MSAN_SUPPORTED_ARCH})
+    add_msan_tests_for_arch(${arch} "")
+    add_msan_tests_for_arch(${arch} "-with-call"
                             -mllvm -msan-instrumentation-with-call-threshold=0)
-  endif()
+  endforeach()
 endif()
diff --git a/lib/msan/tests/msan_test.cc b/lib/msan/tests/msan_test.cc
index 12012a0..00dd20a 100644
--- a/lib/msan/tests/msan_test.cc
+++ b/lib/msan/tests/msan_test.cc
@@ -20,7 +20,19 @@
 
 #include "sanitizer/allocator_interface.h"
 #include "sanitizer/msan_interface.h"
-#include "msandr_test_so.h"
+
+#if defined(__FreeBSD__)
+# define _KERNEL  // To declare 'shminfo' structure.
+# include <sys/shm.h>
+# undef _KERNEL
+extern "C" {
+// <sys/shm.h> doesn't declare these functions in _KERNEL mode.
+void *shmat(int, const void *, int);
+int shmget(key_t, size_t, int);
+int shmctl(int, int, struct shmid_ds *);
+int shmdt(const void *);
+}
+#endif
 
 #include <inttypes.h>
 #include <stdlib.h>
@@ -28,7 +40,6 @@
 #include <stdio.h>
 #include <wchar.h>
 #include <math.h>
-#include <malloc.h>
 
 #include <arpa/inet.h>
 #include <dlfcn.h>
@@ -44,20 +55,40 @@
 #include <sys/resource.h>
 #include <sys/ioctl.h>
 #include <sys/statvfs.h>
-#include <sys/sysinfo.h>
 #include <sys/utsname.h>
 #include <sys/mman.h>
-#include <sys/vfs.h>
 #include <dirent.h>
 #include <pwd.h>
 #include <sys/socket.h>
 #include <netdb.h>
 #include <wordexp.h>
-#include <mntent.h>
-#include <netinet/ether.h>
 #include <sys/ipc.h>
 #include <sys/shm.h>
 
+#if !defined(__FreeBSD__)
+# include <malloc.h>
+# include <sys/sysinfo.h>
+# include <sys/vfs.h>
+# include <mntent.h>
+# include <netinet/ether.h>
+#else
+# include <signal.h>
+# include <netinet/in.h>
+# include <pthread_np.h>
+# include <sys/uio.h>
+# include <sys/mount.h>
+# include <sys/sysctl.h>
+# include <net/ethernet.h>
+# define f_namelen f_namemax  // FreeBSD names this statfs field so.
+# define cpu_set_t cpuset_t
+extern "C" {
+// FreeBSD's <ssp/string.h> defines mempcpy() to be a macro expanding into
+// a __builtin___mempcpy_chk() call, but since Msan RTL defines it as an
+// ordinary function, we can declare it here to complete the tests.
+void *mempcpy(void *dest, const void *src, size_t n);
+}
+#endif
+
 #if defined(__i386__) || defined(__x86_64__)
 # include <emmintrin.h>
 # define MSAN_HAS_M128 1
@@ -69,7 +100,23 @@
 # include <immintrin.h>
 #endif
 
-static const size_t kPageSize = 4096;
+// On FreeBSD procfs is not enabled by default.
+#if defined(__FreeBSD__)
+# define FILE_TO_READ "/bin/cat"
+# define DIR_TO_READ "/bin"
+# define SUBFILE_TO_READ "cat"
+# define SYMLINK_TO_READ "/usr/bin/tar"
+# define SUPERUSER_GROUP "wheel"
+#else
+# define FILE_TO_READ "/proc/self/stat"
+# define DIR_TO_READ "/proc/self"
+# define SUBFILE_TO_READ "stat"
+# define SYMLINK_TO_READ "/proc/self/exe"
+# define SUPERUSER_GROUP "root"
+#endif
+
+const size_t kPageSize = 4096;
+const size_t kMaxPathLength = 4096;
 
 typedef unsigned char      U1;
 typedef unsigned short     U2;  // NOLINT
@@ -87,9 +134,12 @@
   __msan_set_origin(&x, sizeof(x), 0x1234);
   U4 origin = __msan_get_origin(&x);
   __msan_set_origin(&x, sizeof(x), 0);
-  return origin == 0x1234;
+  return __msan_origin_is_descendant_or_same(origin, 0x1234);
 }
 
+#define EXPECT_ORIGIN(expected, origin) \
+  EXPECT_TRUE(__msan_origin_is_descendant_or_same((origin), (expected)))
+
 #define EXPECT_UMR(action) \
     do {                        \
       __msan_set_expect_umr(1); \
@@ -97,14 +147,13 @@
       __msan_set_expect_umr(0); \
     } while (0)
 
-#define EXPECT_UMR_O(action, origin) \
-    do {                                            \
-      __msan_set_expect_umr(1);                     \
-      action;                                       \
-      __msan_set_expect_umr(0);                     \
-      if (TrackingOrigins())                        \
-        EXPECT_EQ(origin, __msan_get_umr_origin()); \
-    } while (0)
+#define EXPECT_UMR_O(action, origin)                                       \
+  do {                                                                     \
+    __msan_set_expect_umr(1);                                              \
+    action;                                                                \
+    __msan_set_expect_umr(0);                                              \
+    if (TrackingOrigins()) EXPECT_ORIGIN(origin, __msan_get_umr_origin()); \
+  } while (0)
 
 #define EXPECT_POISONED(x) ExpectPoisoned(x)
 
@@ -119,15 +168,14 @@
 template<typename T>
 void ExpectPoisonedWithOrigin(const T& t, unsigned origin) {
   EXPECT_NE(-1, __msan_test_shadow((void*)&t, sizeof(t)));
-  if (TrackingOrigins())
-    EXPECT_EQ(origin, __msan_get_origin((void*)&t));
+  if (TrackingOrigins()) EXPECT_ORIGIN(origin, __msan_get_origin((void *)&t));
 }
 
-#define EXPECT_NOT_POISONED(x) ExpectNotPoisoned(x)
+#define EXPECT_NOT_POISONED(x) EXPECT_EQ(true, TestForNotPoisoned((x)))
 
 template<typename T>
-void ExpectNotPoisoned(const T& t) {
-  EXPECT_EQ(-1, __msan_test_shadow((void*)&t, sizeof(t)));
+bool TestForNotPoisoned(const T& t) {
+  return __msan_test_shadow((void*)&t, sizeof(t)) == -1;
 }
 
 static U8 poisoned_array[100];
@@ -494,10 +542,9 @@
 
 TEST(MemorySanitizer, DynRet) {
   ReturnPoisoned<S8>();
-  EXPECT_NOT_POISONED(clearenv());
+  EXPECT_NOT_POISONED(atoi("0"));
 }
 
-
 TEST(MemorySanitizer, DynRet1) {
   ReturnPoisoned<S8>();
 }
@@ -552,7 +599,7 @@
 TEST(MemorySanitizer, strerror_r) {
   errno = 0;
   char buf[1000];
-  char *res = strerror_r(EINVAL, buf, sizeof(buf));
+  char *res = (char*) (size_t) strerror_r(EINVAL, buf, sizeof(buf));
   ASSERT_EQ(0, errno);
   if (!res) res = buf; // POSIX version success.
   EXPECT_NOT_POISONED(strlen(res));
@@ -560,7 +607,7 @@
 
 TEST(MemorySanitizer, fread) {
   char *x = new char[32];
-  FILE *f = fopen("/proc/self/stat", "r");
+  FILE *f = fopen(FILE_TO_READ, "r");
   ASSERT_TRUE(f != NULL);
   fread(x, 1, 32, f);
   EXPECT_NOT_POISONED(x[0]);
@@ -572,7 +619,7 @@
 
 TEST(MemorySanitizer, read) {
   char *x = new char[32];
-  int fd = open("/proc/self/stat", O_RDONLY);
+  int fd = open(FILE_TO_READ, O_RDONLY);
   ASSERT_GT(fd, 0);
   int sz = read(fd, x, 32);
   ASSERT_EQ(sz, 32);
@@ -585,7 +632,7 @@
 
 TEST(MemorySanitizer, pread) {
   char *x = new char[32];
-  int fd = open("/proc/self/stat", O_RDONLY);
+  int fd = open(FILE_TO_READ, O_RDONLY);
   ASSERT_GT(fd, 0);
   int sz = pread(fd, x, 32, 0);
   ASSERT_EQ(sz, 32);
@@ -603,11 +650,11 @@
   iov[0].iov_len = 5;
   iov[1].iov_base = buf + 10;
   iov[1].iov_len = 2000;
-  int fd = open("/proc/self/stat", O_RDONLY);
+  int fd = open(FILE_TO_READ, O_RDONLY);
   ASSERT_GT(fd, 0);
   int sz = readv(fd, iov, 2);
   ASSERT_GE(sz, 0);
-  ASSERT_LT(sz, 5 + 2000);
+  ASSERT_LE(sz, 5 + 2000);
   ASSERT_GT((size_t)sz, iov[0].iov_len);
   EXPECT_POISONED(buf[0]);
   EXPECT_NOT_POISONED(buf[1]);
@@ -627,11 +674,11 @@
   iov[0].iov_len = 5;
   iov[1].iov_base = buf + 10;
   iov[1].iov_len = 2000;
-  int fd = open("/proc/self/stat", O_RDONLY);
+  int fd = open(FILE_TO_READ, O_RDONLY);
   ASSERT_GT(fd, 0);
   int sz = preadv(fd, iov, 2, 3);
   ASSERT_GE(sz, 0);
-  ASSERT_LT(sz, 5 + 2000);
+  ASSERT_LE(sz, 5 + 2000);
   ASSERT_GT((size_t)sz, iov[0].iov_len);
   EXPECT_POISONED(buf[0]);
   EXPECT_NOT_POISONED(buf[1]);
@@ -653,15 +700,14 @@
 
 TEST(MemorySanitizer, readlink) {
   char *x = new char[1000];
-  readlink("/proc/self/exe", x, 1000);
+  readlink(SYMLINK_TO_READ, x, 1000);
   EXPECT_NOT_POISONED(x[0]);
   delete [] x;
 }
 
-
 TEST(MemorySanitizer, stat) {
   struct stat* st = new struct stat;
-  int res = stat("/proc/self/stat", st);
+  int res = stat(FILE_TO_READ, st);
   ASSERT_EQ(0, res);
   EXPECT_NOT_POISONED(st->st_dev);
   EXPECT_NOT_POISONED(st->st_mode);
@@ -670,9 +716,9 @@
 
 TEST(MemorySanitizer, fstatat) {
   struct stat* st = new struct stat;
-  int dirfd = open("/proc/self", O_RDONLY);
+  int dirfd = open(DIR_TO_READ, O_RDONLY);
   ASSERT_GT(dirfd, 0);
-  int res = fstatat(dirfd, "stat", st, 0);
+  int res = fstatat(dirfd, SUBFILE_TO_READ, st, 0);
   ASSERT_EQ(0, res);
   EXPECT_NOT_POISONED(st->st_dev);
   EXPECT_NOT_POISONED(st->st_mode);
@@ -764,6 +810,8 @@
   close(pipefd[1]);
 }
 
+// There is no ppoll() on FreeBSD.
+#if !defined (__FreeBSD__)
 TEST(MemorySanitizer, ppoll) {
   int* pipefd = new int[2];
   int res = pipe(pipefd);
@@ -788,6 +836,7 @@
   close(pipefd[0]);
   close(pipefd[1]);
 }
+#endif
 
 TEST(MemorySanitizer, poll_positive) {
   int* pipefd = new int[2];
@@ -852,8 +901,11 @@
   res = fcntl(connect_socket, F_SETFL, O_NONBLOCK);
   ASSERT_EQ(0, res);
   res = connect(connect_socket, (struct sockaddr *)&sai, sizeof(sai));
-  ASSERT_EQ(-1, res);
-  ASSERT_EQ(EINPROGRESS, errno);
+  // On FreeBSD this connection completes immediately.
+  if (res != 0) {
+    ASSERT_EQ(-1, res);
+    ASSERT_EQ(EINPROGRESS, errno);
+  }
 
   __msan_poison(&sai, sizeof(sai));
   int new_sock = accept(listen_socket, (struct sockaddr *)&sai, &sz);
@@ -974,7 +1026,6 @@
   ASSERT_EQ(0, res);
   ASSERT_EQ(sizeof(client_sai), sz);
 
-  
   const char *s = "message text";
   struct iovec iov;
   iov.iov_base = (void *)s;
@@ -1126,12 +1177,15 @@
   free(res);
 }
 
+// There's no get_current_dir_name() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, get_current_dir_name) {
   char* res = get_current_dir_name();
   ASSERT_TRUE(res != NULL);
   EXPECT_NOT_POISONED(res[0]);
   free(res);
 }
+#endif
 
 TEST(MemorySanitizer, shmctl) {
   int id = shmget(IPC_PRIVATE, 4096, 0644 | IPC_CREAT);
@@ -1142,6 +1196,8 @@
   ASSERT_GT(res, -1);
   EXPECT_NOT_POISONED(ds);
 
+  // FreeBSD does not support shmctl(IPC_INFO) and shmctl(SHM_INFO).
+#if !defined(__FreeBSD__)
   struct shminfo si;
   res = shmctl(id, IPC_INFO, (struct shmid_ds *)&si);
   ASSERT_GT(res, -1);
@@ -1151,6 +1207,7 @@
   res = shmctl(id, SHM_INFO, (struct shmid_ds *)&s_i);
   ASSERT_GT(res, -1);
   EXPECT_NOT_POISONED(s_i);
+#endif
 
   res = shmctl(id, IPC_RMID, 0);
   ASSERT_GT(res, -1);
@@ -1158,7 +1215,7 @@
 
 TEST(MemorySanitizer, shmat) {
   void *p = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
-                 MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   ASSERT_NE(MAP_FAILED, p);
 
   ((char *)p)[10] = *GetPoisoned<U1>();
@@ -1184,6 +1241,8 @@
   ASSERT_GT(res, -1);
 }
 
+// There's no random_r() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, random_r) {
   int32_t x;
   char z[64];
@@ -1199,6 +1258,7 @@
   ASSERT_EQ(0, res);
   EXPECT_NOT_POISONED(x);
 }
+#endif
 
 TEST(MemorySanitizer, confstr) {
   char buf[3];
@@ -1216,6 +1276,16 @@
   ASSERT_EQ(res, strlen(buf2) + 1);
 }
 
+TEST(MemorySanitizer, opendir) {
+  DIR *dir = opendir(".");
+  closedir(dir);
+
+  char name[10] = ".";
+  __msan_poison(name, sizeof(name));
+  EXPECT_UMR(dir = opendir(name));
+  closedir(dir);
+}
+
 TEST(MemorySanitizer, readdir) {
   DIR *dir = opendir(".");
   struct dirent *d = readdir(dir);
@@ -1252,6 +1322,8 @@
   free(res);
 }
 
+// There's no canonicalize_file_name() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, canonicalize_file_name) {
   const char* relpath = ".";
   char* res = canonicalize_file_name(relpath);
@@ -1259,6 +1331,7 @@
   EXPECT_NOT_POISONED(res[0]);
   free(res);
 }
+#endif
 
 extern char **environ;
 
@@ -1539,55 +1612,74 @@
   EXPECT_POISONED(a[7]);
 }
 
-#define TEST_STRTO_INT(func_name)          \
-  TEST(MemorySanitizer, func_name) {       \
-    char *e;                               \
-    EXPECT_EQ(1U, func_name("1", &e, 10)); \
-    EXPECT_NOT_POISONED((S8)e);            \
+#define TEST_STRTO_INT(func_name, char_type, str_prefix) \
+  TEST(MemorySanitizer, func_name) {                     \
+    char_type *e;                                        \
+    EXPECT_EQ(1U, func_name(str_prefix##"1", &e, 10));   \
+    EXPECT_NOT_POISONED((S8)e);                          \
   }
 
-#define TEST_STRTO_FLOAT(func_name)     \
-  TEST(MemorySanitizer, func_name) {    \
-    char *e;                            \
-    EXPECT_NE(0, func_name("1.5", &e)); \
-    EXPECT_NOT_POISONED((S8)e);         \
+#define TEST_STRTO_FLOAT(func_name, char_type, str_prefix) \
+  TEST(MemorySanitizer, func_name) {                       \
+    char_type *e;                                          \
+    EXPECT_NE(0, func_name(str_prefix##"1.5", &e));        \
+    EXPECT_NOT_POISONED((S8)e);                            \
   }
 
-#define TEST_STRTO_FLOAT_LOC(func_name)                          \
+#define TEST_STRTO_FLOAT_LOC(func_name, char_type, str_prefix)   \
   TEST(MemorySanitizer, func_name) {                             \
     locale_t loc = newlocale(LC_NUMERIC_MASK, "C", (locale_t)0); \
-    char *e;                                                     \
-    EXPECT_NE(0, func_name("1.5", &e, loc));                     \
+    char_type *e;                                                \
+    EXPECT_NE(0, func_name(str_prefix##"1.5", &e, loc));         \
     EXPECT_NOT_POISONED((S8)e);                                  \
     freelocale(loc);                                             \
   }
 
-#define TEST_STRTO_INT_LOC(func_name)                            \
+#define TEST_STRTO_INT_LOC(func_name, char_type, str_prefix)     \
   TEST(MemorySanitizer, func_name) {                             \
     locale_t loc = newlocale(LC_NUMERIC_MASK, "C", (locale_t)0); \
-    char *e;                                                     \
-    ASSERT_EQ(1U, func_name("1", &e, 10, loc));                  \
+    char_type *e;                                                \
+    ASSERT_EQ(1U, func_name(str_prefix##"1", &e, 10, loc));      \
     EXPECT_NOT_POISONED((S8)e);                                  \
     freelocale(loc);                                             \
   }
 
-TEST_STRTO_INT(strtol)
-TEST_STRTO_INT(strtoll)
-TEST_STRTO_INT(strtoul)
-TEST_STRTO_INT(strtoull)
+TEST_STRTO_INT(strtol, char, )
+TEST_STRTO_INT(strtoll, char, )
+TEST_STRTO_INT(strtoul, char, )
+TEST_STRTO_INT(strtoull, char, )
 
-TEST_STRTO_FLOAT(strtof)
-TEST_STRTO_FLOAT(strtod)
-TEST_STRTO_FLOAT(strtold)
+TEST_STRTO_FLOAT(strtof, char, )
+TEST_STRTO_FLOAT(strtod, char, )
+TEST_STRTO_FLOAT(strtold, char, )
 
-TEST_STRTO_FLOAT_LOC(strtof_l)
-TEST_STRTO_FLOAT_LOC(strtod_l)
-TEST_STRTO_FLOAT_LOC(strtold_l)
+TEST_STRTO_FLOAT_LOC(strtof_l, char, )
+TEST_STRTO_FLOAT_LOC(strtod_l, char, )
+TEST_STRTO_FLOAT_LOC(strtold_l, char, )
 
-TEST_STRTO_INT_LOC(strtol_l)
-TEST_STRTO_INT_LOC(strtoll_l)
-TEST_STRTO_INT_LOC(strtoul_l)
-TEST_STRTO_INT_LOC(strtoull_l)
+TEST_STRTO_INT_LOC(strtol_l, char, )
+TEST_STRTO_INT_LOC(strtoll_l, char, )
+TEST_STRTO_INT_LOC(strtoul_l, char, )
+TEST_STRTO_INT_LOC(strtoull_l, char, )
+
+TEST_STRTO_INT(wcstol, wchar_t, L)
+TEST_STRTO_INT(wcstoll, wchar_t, L)
+TEST_STRTO_INT(wcstoul, wchar_t, L)
+TEST_STRTO_INT(wcstoull, wchar_t, L)
+
+TEST_STRTO_FLOAT(wcstof, wchar_t, L)
+TEST_STRTO_FLOAT(wcstod, wchar_t, L)
+TEST_STRTO_FLOAT(wcstold, wchar_t, L)
+
+TEST_STRTO_FLOAT_LOC(wcstof_l, wchar_t, L)
+TEST_STRTO_FLOAT_LOC(wcstod_l, wchar_t, L)
+TEST_STRTO_FLOAT_LOC(wcstold_l, wchar_t, L)
+
+TEST_STRTO_INT_LOC(wcstol_l, wchar_t, L)
+TEST_STRTO_INT_LOC(wcstoll_l, wchar_t, L)
+TEST_STRTO_INT_LOC(wcstoul_l, wchar_t, L)
+TEST_STRTO_INT_LOC(wcstoull_l, wchar_t, L)
+
 
 TEST(MemorySanitizer, strtoimax) {
   char *e;
@@ -1603,12 +1695,20 @@
 
 #ifdef __GLIBC__
 extern "C" float __strtof_l(const char *nptr, char **endptr, locale_t loc);
-TEST_STRTO_FLOAT_LOC(__strtof_l)
+TEST_STRTO_FLOAT_LOC(__strtof_l, char, )
 extern "C" double __strtod_l(const char *nptr, char **endptr, locale_t loc);
-TEST_STRTO_FLOAT_LOC(__strtod_l)
+TEST_STRTO_FLOAT_LOC(__strtod_l, char, )
 extern "C" long double __strtold_l(const char *nptr, char **endptr,
                                    locale_t loc);
-TEST_STRTO_FLOAT_LOC(__strtold_l)
+TEST_STRTO_FLOAT_LOC(__strtold_l, char, )
+
+extern "C" float __wcstof_l(const wchar_t *nptr, wchar_t **endptr, locale_t loc);
+TEST_STRTO_FLOAT_LOC(__wcstof_l, wchar_t, L)
+extern "C" double __wcstod_l(const wchar_t *nptr, wchar_t **endptr, locale_t loc);
+TEST_STRTO_FLOAT_LOC(__wcstod_l, wchar_t, L)
+extern "C" long double __wcstold_l(const wchar_t *nptr, wchar_t **endptr,
+                                   locale_t loc);
+TEST_STRTO_FLOAT_LOC(__wcstold_l, wchar_t, L)
 #endif  // __GLIBC__
 
 TEST(MemorySanitizer, modf) {
@@ -1629,26 +1729,35 @@
   EXPECT_NOT_POISONED(y);
 }
 
+// There's no sincos() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, sincos) {
   double s, c;
   sincos(0.2, &s, &c);
   EXPECT_NOT_POISONED(s);
   EXPECT_NOT_POISONED(c);
 }
+#endif
 
+// There's no sincosf() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, sincosf) {
   float s, c;
   sincosf(0.2, &s, &c);
   EXPECT_NOT_POISONED(s);
   EXPECT_NOT_POISONED(c);
 }
+#endif
 
+// There's no sincosl() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, sincosl) {
   long double s, c;
   sincosl(0.2, &s, &c);
   EXPECT_NOT_POISONED(s);
   EXPECT_NOT_POISONED(c);
 }
+#endif
 
 TEST(MemorySanitizer, remquo) {
   int quo;
@@ -1703,13 +1812,18 @@
   EXPECT_NOT_POISONED(sgn);
 }
 
+// There's no lgammal_r() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, lgammal_r) {
   int sgn;
   long double res = lgammal_r(1.1, &sgn);
   ASSERT_NE(0.0, res);
   EXPECT_NOT_POISONED(sgn);
 }
+#endif
 
+// There's no drand48_r() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, drand48_r) {
   struct drand48_data buf;
   srand48_r(0, &buf);
@@ -1717,7 +1831,10 @@
   drand48_r(&buf, &d);
   EXPECT_NOT_POISONED(d);
 }
+#endif
 
+// There's no lrand48_r() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, lrand48_r) {
   struct drand48_data buf;
   srand48_r(0, &buf);
@@ -1725,6 +1842,7 @@
   lrand48_r(&buf, &d);
   EXPECT_NOT_POISONED(d);
 }
+#endif
 
 TEST(MemorySanitizer, sprintf) {  // NOLINT
   char buff[10];
@@ -1834,6 +1952,16 @@
   EXPECT_POISONED(buff[2]);
 }
 
+TEST(MemorySanitizer, wmemset) {
+    wchar_t x[25];
+    break_optimization(x);
+    EXPECT_POISONED(x[0]);
+    wmemset(x, L'A', 10);
+    EXPECT_EQ(x[0], L'A');
+    EXPECT_EQ(x[9], L'A');
+    EXPECT_POISONED(x[10]);
+}
+
 TEST(MemorySanitizer, mbtowc) {
   const char *x = "abc";
   wchar_t wx;
@@ -1979,6 +2107,8 @@
   EXPECT_NE(0U, strlen(time.tm_zone));
 }
 
+// There's no getmntent() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, getmntent) {
   FILE *fp = setmntent("/etc/fstab", "r");
   struct mntent *mnt = getmntent(fp);
@@ -1991,7 +2121,10 @@
   EXPECT_NOT_POISONED(mnt->mnt_passno);
   fclose(fp);
 }
+#endif
 
+// There's no getmntent_r() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, getmntent_r) {
   FILE *fp = setmntent("/etc/fstab", "r");
   struct mntent mntbuf;
@@ -2006,6 +2139,7 @@
   EXPECT_NOT_POISONED(mnt->mnt_passno);
   fclose(fp);
 }
+#endif
 
 TEST(MemorySanitizer, ether) {
   const char *asc = "11:22:33:44:55:66";
@@ -2045,6 +2179,8 @@
   }
 }
 
+// There's no fcvt() on FreeBSD.
+#if !defined(__FreeBSD__)
 // FIXME: enable and add ecvt.
 // FIXME: check why msandr does nt handle fcvt.
 TEST(MemorySanitizer, fcvt) {
@@ -2060,7 +2196,10 @@
   EXPECT_NOT_POISONED(str[0]);
   ASSERT_NE(0U, strlen(str));
 }
+#endif
 
+// There's no fcvt_long() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, fcvt_long) {
   int a, b;
   break_optimization(&a);
@@ -2074,7 +2213,7 @@
   EXPECT_NOT_POISONED(str[0]);
   ASSERT_NE(0U, strlen(str));
 }
-
+#endif
 
 TEST(MemorySanitizer, memchr) {
   char x[10];
@@ -2676,9 +2815,20 @@
   EXPECT_NOT_POISONED(usage.ru_nivcsw);
 }
 
-#ifdef __GLIBC__
-extern char *program_invocation_name;
-#else  // __GLIBC__
+#if defined(__FreeBSD__)
+static void GetProgramPath(char *buf, size_t sz) {
+  int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
+  int res = sysctl(mib, 4, buf, &sz, NULL, 0);
+  ASSERT_EQ(0, res);
+}
+#elif defined(__GLIBC__)
+static void GetProgramPath(char *buf, size_t sz) {
+  extern char *program_invocation_name;
+  int res = snprintf(buf, sz, "%s", program_invocation_name);
+  ASSERT_GE(res, 0);
+  ASSERT_LT((size_t)res, sz);
+}
+#else
 # error "TODO: port this"
 #endif
 
@@ -2713,21 +2863,29 @@
 
 // Compute the path to our loadable DSO.  We assume it's in the same
 // directory.  Only use string routines that we intercept so far to do this.
-static int PathToLoadable(char *buf, size_t sz) {
-  const char *basename = "libmsan_loadable.x86_64.so";
-  char *argv0 = program_invocation_name;
-  char *last_slash = strrchr(argv0, '/');
-  assert(last_slash);
-  int res =
-      snprintf(buf, sz, "%.*s/%s", int(last_slash - argv0), argv0, basename);
-  assert(res >= 0);
-  return (size_t)res < sz ? 0 : res;
+static void GetPathToLoadable(char *buf, size_t sz) {
+  char program_path[kMaxPathLength];
+  GetProgramPath(program_path, sizeof(program_path));
+
+  const char *last_slash = strrchr(program_path, '/');
+  ASSERT_NE(nullptr, last_slash);
+  size_t dir_len = (size_t)(last_slash - program_path);
+#if defined(__x86_64__)
+  static const char basename[] = "libmsan_loadable.x86_64.so";
+#elif defined(__MIPSEB__) || defined(MIPSEB)
+  static const char basename[] = "libmsan_loadable.mips64.so";
+#elif defined(__mips64)
+  static const char basename[] = "libmsan_loadable.mips64el.so";
+#endif
+  int res = snprintf(buf, sz, "%.*s/%s",
+                     (int)dir_len, program_path, basename);
+  ASSERT_GE(res, 0);
+  ASSERT_LT((size_t)res, sz);
 }
 
 TEST(MemorySanitizer, dl_iterate_phdr) {
-  char path[4096];
-  int res = PathToLoadable(path, sizeof(path));
-  ASSERT_EQ(0, res);
+  char path[kMaxPathLength];
+  GetPathToLoadable(path, sizeof(path));
 
   // Having at least one dlopen'ed library in the process makes this more
   // entertaining.
@@ -2737,15 +2895,13 @@
   int count = 0;
   int result = dl_iterate_phdr(dl_phdr_callback, &count);
   ASSERT_GT(count, 0);
-  
+
   dlclose(lib);
 }
 
-
 TEST(MemorySanitizer, dlopen) {
-  char path[4096];
-  int res = PathToLoadable(path, sizeof(path));
-  ASSERT_EQ(0, res);
+  char path[kMaxPathLength];
+  GetPathToLoadable(path, sizeof(path));
 
   // We need to clear shadow for globals when doing dlopen.  In order to test
   // this, we have to poison the shadow for the DSO before we load it.  In
@@ -2770,19 +2926,22 @@
 
 // Regression test for a crash in dlopen() interceptor.
 TEST(MemorySanitizer, dlopenFailed) {
-  const char *path = "/libmsan_loadable_does_not_exist.x86_64.so";
+  const char *path = "/libmsan_loadable_does_not_exist.so";
   void *lib = dlopen(path, RTLD_LAZY);
   ASSERT_TRUE(lib == NULL);
 }
 
 #endif // MSAN_TEST_DISABLE_DLOPEN
 
+// There's no sched_getaffinity() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, sched_getaffinity) {
   cpu_set_t mask;
   int res = sched_getaffinity(getpid(), sizeof(mask), &mask);
   ASSERT_EQ(0, res);
   EXPECT_NOT_POISONED(mask);
 }
+#endif
 
 TEST(MemorySanitizer, scanf) {
   const char *input = "42 hello";
@@ -3012,11 +3171,14 @@
   free(p);
 }
 
+// There's no memalign() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, memalign) {
   void *p = memalign(4096, 13);
   EXPECT_EQ(0U, (uintptr_t)p % kPageSize);
   free(p);
 }
+#endif
 
 TEST(MemorySanitizer, valloc) {
   void *a = valloc(100);
@@ -3024,6 +3186,8 @@
   free(a);
 }
 
+// There's no pvalloc() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, pvalloc) {
   void *p = pvalloc(kPageSize + 100);
   EXPECT_EQ(0U, (uintptr_t)p % kPageSize);
@@ -3035,6 +3199,7 @@
   EXPECT_EQ(kPageSize, __sanitizer_get_allocated_size(p));
   free(p);
 }
+#endif
 
 TEST(MemorySanitizer, inet_pton) {
   const char *s = "1:0:0:0:0:0:0:8";
@@ -3078,12 +3243,15 @@
   EXPECT_NOT_POISONED(strlen(buf));
 }
 
+// There's no sysinfo() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, sysinfo) {
   struct sysinfo info;
   int res = sysinfo(&info);
   ASSERT_EQ(0, res);
   EXPECT_NOT_POISONED(info);
 }
+#endif
 
 TEST(MemorySanitizer, getpwuid) {
   struct passwd *p = getpwuid(0); // root
@@ -3138,8 +3306,10 @@
   struct group grp;
   struct group *grpres;
   char buf[10000];
-  int res = getgrnam_r("root", &grp, buf, sizeof(buf), &grpres);
+  int res = getgrnam_r(SUPERUSER_GROUP, &grp, buf, sizeof(buf), &grpres);
   ASSERT_EQ(0, res);
+  // Note that getgrnam_r() returns 0 if the matching group is not found.
+  ASSERT_NE(nullptr, grpres);
   EXPECT_NOT_POISONED(grp.gr_name);
   ASSERT_TRUE(grp.gr_name != NULL);
   EXPECT_NOT_POISONED(grp.gr_name[0]);
@@ -3171,6 +3341,8 @@
   EXPECT_NOT_POISONED(pwdres);
 }
 
+// There's no fgetpwent() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, fgetpwent) {
   FILE *fp = fopen("/etc/passwd", "r");
   struct passwd *p = fgetpwent(fp);
@@ -3181,6 +3353,7 @@
   EXPECT_NOT_POISONED(p->pw_uid);
   fclose(fp);
 }
+#endif
 
 TEST(MemorySanitizer, getgrent) {
   setgrent();
@@ -3192,6 +3365,8 @@
   EXPECT_NOT_POISONED(p->gr_gid);
 }
 
+// There's no fgetgrent() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, fgetgrent) {
   FILE *fp = fopen("/etc/group", "r");
   struct group *grp = fgetgrent(fp);
@@ -3206,6 +3381,7 @@
   }
   fclose(fp);
 }
+#endif
 
 TEST(MemorySanitizer, getgrent_r) {
   struct group grp;
@@ -3221,6 +3397,8 @@
   EXPECT_NOT_POISONED(grpres);
 }
 
+// There's no fgetgrent_r() on FreeBSD.
+#if !defined(__FreeBSD__)
 TEST(MemorySanitizer, fgetgrent_r) {
   FILE *fp = fopen("/etc/group", "r");
   struct group grp;
@@ -3236,6 +3414,7 @@
   EXPECT_NOT_POISONED(grpres);
   fclose(fp);
 }
+#endif
 
 TEST(MemorySanitizer, getgroups) {
   int n = getgroups(0, 0);
@@ -3363,7 +3542,7 @@
 }
 
 TEST(MemorySanitizer, UnalignedLoad) {
-  char x[32];
+  char x[32] __attribute__((aligned(8)));
   U4 origin = __LINE__;
   for (unsigned i = 0; i < sizeof(x) / 4; ++i)
     __msan_set_origin(x + 4 * i, 4, origin + i);
@@ -3397,7 +3576,7 @@
 }
 
 TEST(MemorySanitizer, UnalignedStore16) {
-  char x[5];
+  char x[5] __attribute__((aligned(4)));
   U2 y2 = 0;
   U4 origin = __LINE__;
   __msan_poison(&y2, 1);
@@ -3408,11 +3587,10 @@
   EXPECT_POISONED_O(x[1], origin);
   EXPECT_NOT_POISONED(x[2]);
   EXPECT_POISONED_O(x[3], origin);
-  EXPECT_POISONED_O(x[4], origin);
 }
 
 TEST(MemorySanitizer, UnalignedStore32) {
-  char x[8];
+  char x[8] __attribute__((aligned(4)));
   U4 y4 = 0;
   U4 origin = __LINE__;
   __msan_poison(&y4, 2);
@@ -3430,7 +3608,7 @@
 }
 
 TEST(MemorySanitizer, UnalignedStore64) {
-  char x[16];
+  char x[16] __attribute__((aligned(8)));
   U8 y8 = 0;
   U4 origin = __LINE__;
   __msan_poison(&y8, 3);
@@ -3453,7 +3631,7 @@
 }
 
 TEST(MemorySanitizer, UnalignedStore16_precise) {
-  char x[8];
+  char x[8] __attribute__((aligned(4)));
   U2 y = 0;
   U4 originx1 = __LINE__;
   U4 originx2 = __LINE__;
@@ -3476,7 +3654,7 @@
 }
 
 TEST(MemorySanitizer, UnalignedStore16_precise2) {
-  char x[8];
+  char x[8] __attribute__((aligned(4)));
   U2 y = 0;
   U4 originx1 = __LINE__;
   U4 originx2 = __LINE__;
@@ -3499,7 +3677,7 @@
 }
 
 TEST(MemorySanitizer, UnalignedStore64_precise) {
-  char x[12];
+  char x[12] __attribute__((aligned(8)));
   U8 y = 0;
   U4 originx1 = __LINE__;
   U4 originx2 = __LINE__;
@@ -3531,7 +3709,7 @@
 }
 
 TEST(MemorySanitizer, UnalignedStore64_precise2) {
-  char x[12];
+  char x[12] __attribute__((aligned(8)));
   U8 y = 0;
   U4 originx1 = __LINE__;
   U4 originx2 = __LINE__;
@@ -3561,7 +3739,7 @@
   EXPECT_POISONED_O(x[11], originx3);
 }
 
-#if defined(__clang__)
+#if (defined(__x86_64__) && defined(__clang__))
 namespace {
 typedef U1 V16x8 __attribute__((__vector_size__(16)));
 typedef U2 V8x16 __attribute__((__vector_size__(16)));
@@ -3725,15 +3903,15 @@
 #endif  // defined(__clang__)
 
 TEST(MemorySanitizerOrigins, SetGet) {
-  EXPECT_EQ(TrackingOrigins(), __msan_get_track_origins());
+  EXPECT_EQ(TrackingOrigins(), !!__msan_get_track_origins());
   if (!TrackingOrigins()) return;
   int x;
   __msan_set_origin(&x, sizeof(x), 1234);
-  EXPECT_EQ(1234U, __msan_get_origin(&x));
+  EXPECT_ORIGIN(1234U, __msan_get_origin(&x));
   __msan_set_origin(&x, sizeof(x), 5678);
-  EXPECT_EQ(5678U, __msan_get_origin(&x));
+  EXPECT_ORIGIN(5678U, __msan_get_origin(&x));
   __msan_set_origin(&x, sizeof(x), 0);
-  EXPECT_EQ(0U, __msan_get_origin(&x));
+  EXPECT_ORIGIN(0U, __msan_get_origin(&x));
 }
 
 namespace {
@@ -3743,19 +3921,18 @@
   U2 b;
 };
 
-// http://code.google.com/p/memory-sanitizer/issues/detail?id=6
-TEST(MemorySanitizerOrigins, DISABLED_InitializedStoreDoesNotChangeOrigin) {
+TEST(MemorySanitizerOrigins, InitializedStoreDoesNotChangeOrigin) {
   if (!TrackingOrigins()) return;
 
   S s;
   U4 origin = rand();  // NOLINT
   s.a = *GetPoisonedO<U2>(0, origin);
-  EXPECT_EQ(origin, __msan_get_origin(&s.a));
-  EXPECT_EQ(origin, __msan_get_origin(&s.b));
+  EXPECT_ORIGIN(origin, __msan_get_origin(&s.a));
+  EXPECT_ORIGIN(origin, __msan_get_origin(&s.b));
 
   s.b = 42;
-  EXPECT_EQ(origin, __msan_get_origin(&s.a));
-  EXPECT_EQ(origin, __msan_get_origin(&s.b));
+  EXPECT_ORIGIN(origin, __msan_get_origin(&s.a));
+  EXPECT_ORIGIN(origin, __msan_get_origin(&s.b));
 }
 }  // namespace
 
@@ -3771,7 +3948,8 @@
   *z = op(*x, *y);
   U4 origin = __msan_get_origin(z);
   EXPECT_POISONED_O(*z, origin);
-  EXPECT_EQ(true, origin == ox || origin == oy);
+  EXPECT_EQ(true, __msan_origin_is_descendant_or_same(origin, ox) ||
+                      __msan_origin_is_descendant_or_same(origin, oy));
 
   // y is poisoned, x is not.
   *x = 10101;
@@ -3780,7 +3958,7 @@
   __msan_set_origin(z, sizeof(*z), 0);
   *z = op(*x, *y);
   EXPECT_POISONED_O(*z, oy);
-  EXPECT_EQ(__msan_get_origin(z), oy);
+  EXPECT_ORIGIN(oy, __msan_get_origin(z));
 
   // x is poisoned, y is not.
   *x = *GetPoisonedO<T>(0, ox);
@@ -3789,7 +3967,7 @@
   __msan_set_origin(z, sizeof(*z), 0);
   *z = op(*x, *y);
   EXPECT_POISONED_O(*z, ox);
-  EXPECT_EQ(__msan_get_origin(z), ox);
+  EXPECT_ORIGIN(ox, __msan_get_origin(z));
 }
 
 template<class T> INLINE T XOR(const T &a, const T&b) { return a ^ b; }
@@ -4081,7 +4259,8 @@
 
   // Allocate the page that was released to the OS in free() with the real mmap,
   // bypassing the interceptor.
-  char *q = (char *)real_mmap(p, 4096, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+  char *q = (char *)real_mmap(p, 4096, PROT_READ | PROT_WRITE,
+                              MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   ASSERT_NE((char *)0, q);
 
   ASSERT_TRUE(q <= p);
diff --git a/lib/msan/tests/msandr_test_so.cc b/lib/msan/tests/msandr_test_so.cc
deleted file mode 100644
index eb605d4..0000000
--- a/lib/msan/tests/msandr_test_so.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-//===-- msandr_test_so.cc  ------------------------------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of MemorySanitizer.
-//
-// MemorySanitizer unit tests.
-//===----------------------------------------------------------------------===//
-
-#include "msandr_test_so.h"
-
-void dso_memfill(char* s, unsigned n) {
-  for (unsigned i = 0; i < n; ++i)
-    s[i] = i;
-}
-
-int dso_callfn(int (*fn)(void)) {
-  volatile int x = fn();
-  return x;
-}
-
-int dso_callfn1(int (*fn)(long long, long long, long long)) {  //NOLINT
-  volatile int x = fn(1, 2, 3);
-  return x;
-}
-
-int dso_stack_store(void (*fn)(int*, int*), int x) {
-  int y = x + 1;
-  fn(&x, &y);
-  return y;
-}
-
-void break_optimization(void *x) {}
diff --git a/lib/msan/tests/msandr_test_so.h b/lib/msan/tests/msandr_test_so.h
deleted file mode 100644
index cd75ff3..0000000
--- a/lib/msan/tests/msandr_test_so.h
+++ /dev/null
@@ -1,24 +0,0 @@
-//===-- msandr_test_so.h ----------------------------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of MemorySanitizer.
-//
-// MemorySanitizer unit tests.
-//===----------------------------------------------------------------------===//
-
-#ifndef MSANDR_MSANDR_TEST_SO_H
-#define MSANDR_MSANDR_TEST_SO_H
-
-void dso_memfill(char* s, unsigned n);
-int dso_callfn(int (*fn)(void));
-int dso_callfn1(int (*fn)(long long, long long, long long));  //NOLINT
-int dso_stack_store(void (*fn)(int*, int*), int x);
-void break_optimization(void *x);
-
-#endif
diff --git a/lib/profile/InstrProfiling.h b/lib/profile/InstrProfiling.h
index a086f3d..2b1bd00 100644
--- a/lib/profile/InstrProfiling.h
+++ b/lib/profile/InstrProfiling.h
@@ -57,9 +57,6 @@
 uint64_t *__llvm_profile_begin_counters(void);
 uint64_t *__llvm_profile_end_counters(void);
 
-#define PROFILE_RANGE_SIZE(Range) \
-  (__llvm_profile_end_ ## Range () - __llvm_profile_begin_ ## Range ())
-
 /*!
  * \brief Write instrumentation data to the current file.
  *
diff --git a/lib/profile/InstrProfilingBuffer.c b/lib/profile/InstrProfilingBuffer.c
index 3351b07..3c429c8 100644
--- a/lib/profile/InstrProfilingBuffer.c
+++ b/lib/profile/InstrProfilingBuffer.c
@@ -8,17 +8,38 @@
 \*===----------------------------------------------------------------------===*/
 
 #include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+
 #include <string.h>
 
 __attribute__((visibility("hidden")))
 uint64_t __llvm_profile_get_size_for_buffer(void) {
+  const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
+  const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
+  const uint64_t *CountersBegin = __llvm_profile_begin_counters();
+  const uint64_t *CountersEnd = __llvm_profile_end_counters();
+  const char *NamesBegin = __llvm_profile_begin_names();
+  const char *NamesEnd = __llvm_profile_end_names();
+
+  return __llvm_profile_get_size_for_buffer_internal(
+      DataBegin, DataEnd, CountersBegin, CountersEnd, NamesBegin, NamesEnd);
+}
+
+#define PROFILE_RANGE_SIZE(Range) (Range##End - Range##Begin)
+
+__attribute__((visibility("hidden")))
+uint64_t __llvm_profile_get_size_for_buffer_internal(
+        const __llvm_profile_data *DataBegin,
+        const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
+        const uint64_t *CountersEnd, const char *NamesBegin,
+        const char *NamesEnd) {
   /* Match logic in __llvm_profile_write_buffer(). */
-  const uint64_t NamesSize = PROFILE_RANGE_SIZE(names) * sizeof(char);
+  const uint64_t NamesSize = PROFILE_RANGE_SIZE(Names) * sizeof(char);
   const uint64_t Padding = sizeof(uint64_t) - NamesSize % sizeof(uint64_t);
   return sizeof(uint64_t) * PROFILE_HEADER_SIZE +
-     PROFILE_RANGE_SIZE(data) * sizeof(__llvm_profile_data) +
-     PROFILE_RANGE_SIZE(counters) * sizeof(uint64_t) +
-     NamesSize + Padding;
+      PROFILE_RANGE_SIZE(Data) * sizeof(__llvm_profile_data) +
+      PROFILE_RANGE_SIZE(Counters) * sizeof(uint64_t) +
+      NamesSize + Padding;
 }
 
 __attribute__((visibility("hidden")))
@@ -33,6 +54,20 @@
   const char *NamesBegin = __llvm_profile_begin_names();
   const char *NamesEnd   = __llvm_profile_end_names();
 
+  return __llvm_profile_write_buffer_internal(Buffer, DataBegin, DataEnd,
+                                              CountersBegin, CountersEnd,
+                                              NamesBegin, NamesEnd);
+}
+
+__attribute__((visibility("hidden")))
+int __llvm_profile_write_buffer_internal(
+    char *Buffer, const __llvm_profile_data *DataBegin,
+    const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
+    const uint64_t *CountersEnd, const char *NamesBegin, const char *NamesEnd) {
+  /* Match logic in __llvm_profile_get_size_for_buffer().
+   * Match logic in __llvm_profile_write_file().
+   */
+
   /* Calculate size of sections. */
   const uint64_t DataSize = DataEnd - DataBegin;
   const uint64_t CountersSize = CountersEnd - CountersBegin;
diff --git a/lib/profile/InstrProfilingFile.c b/lib/profile/InstrProfilingFile.c
index 5aef390..daa3094 100644
--- a/lib/profile/InstrProfilingFile.c
+++ b/lib/profile/InstrProfilingFile.c
@@ -11,6 +11,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <sys/errno.h>
 
 #define UNCONST(ptr) ((void *)(uintptr_t)(ptr))
 
@@ -84,12 +85,15 @@
 }
 
 static void truncateCurrentFile(void) {
-  const char *Filename = __llvm_profile_CurrentFilename;
+  const char *Filename;
+  FILE *File;
+
+  Filename = __llvm_profile_CurrentFilename;
   if (!Filename || !Filename[0])
     return;
 
   /* Truncate the file.  Later we'll reopen and append. */
-  FILE *File = fopen(Filename, "w");
+  File = fopen(Filename, "w");
   if (!File)
     return;
   fclose(File);
@@ -100,15 +104,16 @@
 int getpid(void);
 static int setFilenameFromEnvironment(void) {
   const char *Filename = getenv("LLVM_PROFILE_FILE");
+#define MAX_PID_SIZE 16
+  char PidChars[MAX_PID_SIZE] = {0};
+  int NumPids = 0, PidLength = 0;
+  char *Allocated;
+  int I, J;
+
   if (!Filename || !Filename[0])
     return -1;
 
   /* Check the filename for "%p", which indicates a pid-substitution. */
-#define MAX_PID_SIZE 16
-  char PidChars[MAX_PID_SIZE] = {0};
-  int NumPids = 0;
-  int PidLength = 0;
-  int I;
   for (I = 0; Filename[I]; ++I)
     if (Filename[I] == '%' && Filename[++I] == 'p')
       if (!NumPids++) {
@@ -122,12 +127,11 @@
   }
 
   /* Allocate enough space for the substituted filename. */
-  char *Allocated = (char*)malloc(I + NumPids*(PidLength - 2) + 1);
+  Allocated = malloc(I + NumPids*(PidLength - 2) + 1);
   if (!Allocated)
     return -1;
 
   /* Construct the new filename. */
-  int J;
   for (I = 0, J = 0; Filename[I]; ++I)
     if (Filename[I] == '%') {
       if (Filename[++I] == 'p') {
@@ -170,12 +174,18 @@
 
 __attribute__((visibility("hidden")))
 int __llvm_profile_write_file(void) {
+  int rc;
+
   /* Check the filename. */
   if (!__llvm_profile_CurrentFilename)
     return -1;
 
   /* Write the file. */
-  return writeFileWithName(__llvm_profile_CurrentFilename);
+  rc = writeFileWithName(__llvm_profile_CurrentFilename);
+  if (rc && getenv("LLVM_PROFILE_VERBOSE_ERRORS"))
+    fprintf(stderr, "LLVM Profile: Failed to write file \"%s\": %s\n",
+            __llvm_profile_CurrentFilename, strerror(errno));
+  return rc;
 }
 
 static void writeFileWithoutReturn(void) {
diff --git a/lib/profile/InstrProfilingInternal.h b/lib/profile/InstrProfilingInternal.h
new file mode 100644
index 0000000..ede39cd
--- /dev/null
+++ b/lib/profile/InstrProfilingInternal.h
@@ -0,0 +1,40 @@
+/*===- InstrProfiling.h- Support library for PGO instrumentation ----------===*\
+|*
+|*                     The LLVM Compiler Infrastructure
+|*
+|* This file is distributed under the University of Illinois Open Source
+|* License. See LICENSE.TXT for details.
+|*
+\*===----------------------------------------------------------------------===*/
+
+#ifndef PROFILE_INSTRPROFILING_INTERNALH_
+#define PROFILE_INSTRPROFILING_INTERNALH_
+
+#include "InstrProfiling.h"
+
+/*!
+ * \brief Write instrumentation data to the given buffer, given explicit
+ * pointers to the live data in memory.  This function is probably not what you
+ * want.  Use __llvm_profile_get_size_for_buffer instead.  Use this function if
+ * your program has a custom memory layout.
+ */
+uint64_t __llvm_profile_get_size_for_buffer_internal(
+    const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd,
+    const uint64_t *CountersBegin, const uint64_t *CountersEnd,
+    const char *NamesBegin, const char *NamesEnd);
+
+/*!
+ * \brief Write instrumentation data to the given buffer, given explicit
+ * pointers to the live data in memory.  This function is probably not what you
+ * want.  Use __llvm_profile_write_buffer instead.  Use this function if your
+ * program has a custom memory layout.
+ *
+ * \pre \c Buffer is the start of a buffer at least as big as \a
+ * __llvm_profile_get_size_for_buffer_internal().
+ */
+int __llvm_profile_write_buffer_internal(
+    char *Buffer, const __llvm_profile_data *DataBegin,
+    const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
+    const uint64_t *CountersEnd, const char *NamesBegin, const char *NamesEnd);
+
+#endif
diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt
index fe4418c..6eb6ca8 100644
--- a/lib/sanitizer_common/CMakeLists.txt
+++ b/lib/sanitizer_common/CMakeLists.txt
@@ -7,6 +7,7 @@
   sanitizer_deadlock_detector1.cc
   sanitizer_deadlock_detector2.cc
   sanitizer_flags.cc
+  sanitizer_flag_parser.cc
   sanitizer_libc.cc
   sanitizer_libignore.cc
   sanitizer_linux.cc
@@ -63,7 +64,10 @@
   sanitizer_common_syscalls.inc
   sanitizer_deadlock_detector.h
   sanitizer_deadlock_detector_interface.h
+  sanitizer_flag_parser.h
   sanitizer_flags.h
+  sanitizer_flags.inc
+  sanitizer_interface_internal.h
   sanitizer_internal_defs.h
   sanitizer_lfstack.h
   sanitizer_libc.h
@@ -105,11 +109,10 @@
 set(SANITIZER_CFLAGS ${SANITIZER_COMMON_CFLAGS})
 append_no_rtti_flag(SANITIZER_CFLAGS)
 
-# Stack frames on PowerPC are much larger than anticipated.
-if(NOT ${LLVM_NATIVE_ARCH} STREQUAL "PowerPC")
-  append_list_if(COMPILER_RT_HAS_WFRAME_LARGER_THAN_FLAG -Wframe-larger-than=512 SANITIZER_CFLAGS)
-endif()
-append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors SANITIZER_CFLAGS)
+append_list_if(SANITIZER_LIMIT_FRAME_SIZE -Wframe-larger-than=512
+               SANITIZER_CFLAGS)
+append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors
+               SANITIZER_CFLAGS)
 
 add_custom_target(sanitizer_common)
 set(SANITIZER_RUNTIME_LIBRARIES)
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index 47509f8..03b3e83 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -14,7 +14,6 @@
 #include "sanitizer_allocator.h"
 #include "sanitizer_allocator_internal.h"
 #include "sanitizer_common.h"
-#include "sanitizer_flags.h"
 
 namespace __sanitizer {
 
@@ -61,7 +60,7 @@
     SpinMutexLock l(&internal_alloc_init_mu);
     if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
         0) {
-      internal_allocator_instance->Init();
+      internal_allocator_instance->Init(/* may_return_null*/ false);
       atomic_store(&internal_allocator_initialized, 1, memory_order_release);
     }
   }
@@ -140,14 +139,12 @@
   return (max / size) < n;
 }
 
-void *AllocatorReturnNull() {
-  if (common_flags()->allocator_may_return_null)
-    return 0;
+void NORETURN ReportAllocatorCannotReturnNull() {
   Report("%s's allocator is terminating the process instead of returning 0\n",
          SanitizerToolName);
   Report("If you don't like this behavior set allocator_may_return_null=1\n");
   CHECK(0);
-  return 0;
+  Die();
 }
 
 }  // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index 2321801..b5105f8 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -23,8 +23,8 @@
 
 namespace __sanitizer {
 
-// Depending on allocator_may_return_null either return 0 or crash.
-void *AllocatorReturnNull();
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull();
 
 // SizeClassMap maps allocation sizes into size classes and back.
 // Class 0 corresponds to size 0.
@@ -211,6 +211,7 @@
   void Init() {
     internal_memset(this, 0, sizeof(*this));
   }
+  void InitLinkerInitialized() {}
 
   void Add(AllocatorStat i, uptr v) {
     v += atomic_load(&stats_[i], memory_order_relaxed);
@@ -240,11 +241,14 @@
 // Global stats, used for aggregation and querying.
 class AllocatorGlobalStats : public AllocatorStats {
  public:
-  void Init() {
-    internal_memset(this, 0, sizeof(*this));
+  void InitLinkerInitialized() {
     next_ = this;
     prev_ = this;
   }
+  void Init() {
+    internal_memset(this, 0, sizeof(*this));
+    InitLinkerInitialized();
+  }
 
   void Register(AllocatorStats *s) {
     SpinMutexLock l(&mu_);
@@ -1002,9 +1006,14 @@
 template <class MapUnmapCallback = NoOpMapUnmapCallback>
 class LargeMmapAllocator {
  public:
-  void Init() {
-    internal_memset(this, 0, sizeof(*this));
+  void InitLinkerInitialized(bool may_return_null) {
     page_size_ = GetPageSizeCached();
+    atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+  }
+
+  void Init(bool may_return_null) {
+    internal_memset(this, 0, sizeof(*this));
+    InitLinkerInitialized(may_return_null);
   }
 
   void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
@@ -1012,7 +1021,9 @@
     uptr map_size = RoundUpMapSize(size);
     if (alignment > page_size_)
       map_size += alignment;
-    if (map_size < size) return AllocatorReturnNull();  // Overflow.
+    // Overflow.
+    if (map_size < size)
+      return ReturnNullOrDie();
     uptr map_beg = reinterpret_cast<uptr>(
         MmapOrDie(map_size, "LargeMmapAllocator"));
     CHECK(IsAligned(map_beg, page_size_));
@@ -1048,6 +1059,16 @@
     return reinterpret_cast<void*>(res);
   }
 
+  void *ReturnNullOrDie() {
+    if (atomic_load(&may_return_null_, memory_order_acquire))
+      return 0;
+    ReportAllocatorCannotReturnNull();
+  }
+
+  void SetMayReturnNull(bool may_return_null) {
+    atomic_store(&may_return_null_, may_return_null, memory_order_release);
+  }
+
   void Deallocate(AllocatorStats *stat, void *p) {
     Header *h = GetHeader(p);
     {
@@ -1226,6 +1247,7 @@
   struct Stats {
     uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
   } stats;
+  atomic_uint8_t may_return_null_;
   SpinMutex mutex_;
 };
 
@@ -1239,19 +1261,32 @@
           class SecondaryAllocator>  // NOLINT
 class CombinedAllocator {
  public:
-  void Init() {
+  void InitCommon(bool may_return_null) {
     primary_.Init();
-    secondary_.Init();
+    atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+  }
+
+  void InitLinkerInitialized(bool may_return_null) {
+    secondary_.InitLinkerInitialized(may_return_null);
+    stats_.InitLinkerInitialized();
+    InitCommon(may_return_null);
+  }
+
+  void Init(bool may_return_null) {
+    secondary_.Init(may_return_null);
     stats_.Init();
+    InitCommon(may_return_null);
   }
 
   void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
-                 bool cleared = false) {
+                 bool cleared = false, bool check_rss_limit = false) {
     // Returning 0 on malloc(0) may break a lot of code.
     if (size == 0)
       size = 1;
     if (size + alignment < size)
-      return AllocatorReturnNull();
+      return ReturnNullOrDie();
+    if (check_rss_limit && RssLimitIsExceeded())
+      return ReturnNullOrDie();
     if (alignment > 8)
       size = RoundUpTo(size, alignment);
     void *res;
@@ -1267,6 +1302,30 @@
     return res;
   }
 
+  bool MayReturnNull() const {
+    return atomic_load(&may_return_null_, memory_order_acquire);
+  }
+
+  void *ReturnNullOrDie() {
+    if (MayReturnNull())
+      return 0;
+    ReportAllocatorCannotReturnNull();
+  }
+
+  void SetMayReturnNull(bool may_return_null) {
+    secondary_.SetMayReturnNull(may_return_null);
+    atomic_store(&may_return_null_, may_return_null, memory_order_release);
+  }
+
+  bool RssLimitIsExceeded() {
+    return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
+  }
+
+  void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
+    atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
+                 memory_order_release);
+  }
+
   void Deallocate(AllocatorCache *cache, void *p) {
     if (!p) return;
     if (primary_.PointerIsMine(p))
@@ -1379,6 +1438,8 @@
   PrimaryAllocator primary_;
   SecondaryAllocator secondary_;
   AllocatorGlobalStats stats_;
+  atomic_uint8_t may_return_null_;
+  atomic_uint8_t rss_limit_is_exceeded_;
 };
 
 // Returns true if calloc(size, n) should return 0 due to overflow in size*n.
diff --git a/lib/sanitizer_common/sanitizer_allocator_internal.h b/lib/sanitizer_common/sanitizer_allocator_internal.h
index 4409fd6..9b9cfd0 100644
--- a/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -49,6 +49,15 @@
 void InternalFree(void *p, InternalAllocatorCache *cache = 0);
 InternalAllocator *internal_allocator();
 
+enum InternalAllocEnum {
+  INTERNAL_ALLOC
+};
+
 }  // namespace __sanitizer
 
+inline void *operator new(__sanitizer::operator_new_size_type size,
+                          InternalAllocEnum) {
+  return InternalAlloc(size);
+}
+
 #endif  // SANITIZER_ALLOCATOR_INTERNAL_H
diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc
index c77e50e..4be3c7a 100644
--- a/lib/sanitizer_common/sanitizer_common.cc
+++ b/lib/sanitizer_common/sanitizer_common.cc
@@ -12,13 +12,17 @@
 //===----------------------------------------------------------------------===//
 
 #include "sanitizer_common.h"
+#include "sanitizer_allocator_internal.h"
 #include "sanitizer_flags.h"
 #include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
 
 namespace __sanitizer {
 
 const char *SanitizerToolName = "SanitizerTool";
 
+atomic_uint32_t current_verbosity;
+
 uptr GetPageSizeCached() {
   static uptr PageSize;
   if (!PageSize)
@@ -26,19 +30,66 @@
   return PageSize;
 }
 
+StaticSpinMutex report_file_mu;
+ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
 
-// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid|
-// isn't equal to the current PID, try to obtain file descriptor by opening
-// file "report_path_prefix.<PID>".
-fd_t report_fd = kStderrFd;
+void RawWrite(const char *buffer) {
+  report_file.Write(buffer, internal_strlen(buffer));
+}
 
-// Set via __sanitizer_set_report_path.
-bool log_to_file = false;
-char report_path_prefix[sizeof(report_path_prefix)];
+void ReportFile::ReopenIfNecessary() {
+  mu->CheckLocked();
+  if (fd == kStdoutFd || fd == kStderrFd) return;
 
-// PID of process that opened |report_fd|. If a fork() occurs, the PID of the
-// child thread will be different from |report_fd_pid|.
-uptr report_fd_pid = 0;
+  uptr pid = internal_getpid();
+  // If in tracer, use the parent's file.
+  if (pid == stoptheworld_tracer_pid)
+    pid = stoptheworld_tracer_ppid;
+  if (fd != kInvalidFd) {
+    // If the report file is already opened by the current process,
+    // do nothing. Otherwise the report file was opened by the parent
+    // process, close it now.
+    if (fd_pid == pid)
+      return;
+    else
+      internal_close(fd);
+  }
+
+  internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
+  uptr openrv = OpenFile(full_path, true);
+  if (internal_iserror(openrv)) {
+    const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
+    internal_write(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
+    internal_write(kStderrFd, full_path, internal_strlen(full_path));
+    Die();
+  }
+  fd = openrv;
+  fd_pid = pid;
+}
+
+void ReportFile::SetReportPath(const char *path) {
+  if (!path)
+    return;
+  uptr len = internal_strlen(path);
+  if (len > sizeof(path_prefix) - 100) {
+    Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
+           path[0], path[1], path[2], path[3],
+           path[4], path[5], path[6], path[7]);
+    Die();
+  }
+
+  SpinMutexLock l(mu);
+  if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)
+    internal_close(fd);
+  fd = kInvalidFd;
+  if (internal_strcmp(path, "stdout") == 0) {
+    fd = kStdoutFd;
+  } else if (internal_strcmp(path, "stderr") == 0) {
+    fd = kStderrFd;
+  } else {
+    internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
+  }
+}
 
 // PID of the tracer task in StopTheWorld. It shares the address space with the
 // main process, but has a different PID and thus requires special handling.
@@ -47,19 +98,23 @@
 // writing to the same log file.
 uptr stoptheworld_tracer_ppid = 0;
 
-static DieCallbackType DieCallback;
+static DieCallbackType InternalDieCallback, UserDieCallback;
 void SetDieCallback(DieCallbackType callback) {
-  DieCallback = callback;
+  InternalDieCallback = callback;
+}
+void SetUserDieCallback(DieCallbackType callback) {
+  UserDieCallback = callback;
 }
 
 DieCallbackType GetDieCallback() {
-  return DieCallback;
+  return InternalDieCallback;
 }
 
 void NORETURN Die() {
-  if (DieCallback) {
-    DieCallback();
-  }
+  if (UserDieCallback)
+    UserDieCallback();
+  if (InternalDieCallback)
+    InternalDieCallback();
   internal__exit(1);
 }
 
@@ -78,8 +133,8 @@
   Die();
 }
 
-uptr ReadFileToBuffer(const char *file_name, char **buff,
-                      uptr *buff_size, uptr max_len) {
+uptr ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+                      uptr max_len, int *errno_p) {
   uptr PageSize = GetPageSizeCached();
   uptr kMinFileLen = PageSize;
   uptr read_len = 0;
@@ -88,7 +143,7 @@
   // The files we usually open are not seekable, so try different buffer sizes.
   for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
     uptr openrv = OpenFile(file_name, /*write*/ false);
-    if (internal_iserror(openrv)) return 0;
+    if (internal_iserror(openrv, errno_p)) return 0;
     fd_t fd = openrv;
     UnmapOrDie(*buff, *buff_size);
     *buff = (char*)MmapOrDie(size, __func__);
@@ -98,6 +153,10 @@
     bool reached_eof = false;
     while (read_len + PageSize <= size) {
       uptr just_read = internal_read(fd, *buff + read_len, PageSize);
+      if (internal_iserror(just_read, errno_p)) {
+        UnmapOrDie(*buff, *buff_size);
+        return 0;
+      }
       if (just_read == 0) {
         reached_eof = true;
         break;
@@ -166,9 +225,8 @@
 void ReportErrorSummary(const char *error_message) {
   if (!common_flags()->print_summary)
     return;
-  InternalScopedBuffer<char> buff(kMaxSummaryLength);
-  internal_snprintf(buff.data(), buff.size(),
-                    "SUMMARY: %s: %s", SanitizerToolName, error_message);
+  InternalScopedString buff(kMaxSummaryLength);
+  buff.append("SUMMARY: %s: %s", SanitizerToolName, error_message);
   __sanitizer_report_error_summary(buff.data());
 }
 
@@ -176,31 +234,39 @@
                         int line, const char *function) {
   if (!common_flags()->print_summary)
     return;
-  InternalScopedBuffer<char> buff(kMaxSummaryLength);
-  internal_snprintf(
-      buff.data(), buff.size(), "%s %s:%d %s", error_type,
-      file ? StripPathPrefix(file, common_flags()->strip_path_prefix) : "??",
-      line, function ? function : "??");
+  InternalScopedString buff(kMaxSummaryLength);
+  buff.append("%s %s:%d %s", error_type,
+              file ? StripPathPrefix(file, common_flags()->strip_path_prefix)
+                   : "??",
+              line, function ? function : "??");
   ReportErrorSummary(buff.data());
 }
 
 LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
   full_name_ = internal_strdup(module_name);
   base_address_ = base_address;
-  n_ranges_ = 0;
+  ranges_.clear();
+}
+
+void LoadedModule::clear() {
+  InternalFree(full_name_);
+  while (!ranges_.empty()) {
+    AddressRange *r = ranges_.front();
+    ranges_.pop_front();
+    InternalFree(r);
+  }
 }
 
 void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
-  CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges);
-  ranges_[n_ranges_].beg = beg;
-  ranges_[n_ranges_].end = end;
-  exec_[n_ranges_] = executable;
-  n_ranges_++;
+  void *mem = InternalAlloc(sizeof(AddressRange));
+  AddressRange *r = new(mem) AddressRange(beg, end, executable);
+  ranges_.push_back(r);
 }
 
 bool LoadedModule::containsAddress(uptr address) const {
-  for (uptr i = 0; i < n_ranges_; i++) {
-    if (ranges_[i].beg <= address && address < ranges_[i].end)
+  for (Iterator iter = ranges(); iter.hasNext();) {
+    const AddressRange *r = iter.next();
+    if (r->beg <= address && address < r->end)
       return true;
   }
   return false;
@@ -212,12 +278,9 @@
   if (!common_flags()->mmap_limit_mb) return;
   uptr total_mmaped =
       atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;
-  if ((total_mmaped >> 20) > common_flags()->mmap_limit_mb) {
-    // Since for now mmap_limit_mb is not a user-facing flag, just CHECK.
-    uptr mmap_limit_mb = common_flags()->mmap_limit_mb;
-    common_flags()->mmap_limit_mb = 0;  // Allow mmap in CHECK.
-    RAW_CHECK(total_mmaped >> 20 < mmap_limit_mb);
-  }
+  // Since for now mmap_limit_mb is not a user-facing flag, just kill
+  // a program. Use RAW_CHECK to avoid extra mmaps in reporting.
+  RAW_CHECK((total_mmaped >> 20) < common_flags()->mmap_limit_mb);
 }
 
 void DecreaseTotalMmap(uptr size) {
@@ -225,39 +288,63 @@
   atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
 }
 
+bool TemplateMatch(const char *templ, const char *str) {
+  if (str == 0 || str[0] == 0)
+    return false;
+  bool start = false;
+  if (templ && templ[0] == '^') {
+    start = true;
+    templ++;
+  }
+  bool asterisk = false;
+  while (templ && templ[0]) {
+    if (templ[0] == '*') {
+      templ++;
+      start = false;
+      asterisk = true;
+      continue;
+    }
+    if (templ[0] == '$')
+      return str[0] == 0 || asterisk;
+    if (str[0] == 0)
+      return false;
+    char *tpos = (char*)internal_strchr(templ, '*');
+    char *tpos1 = (char*)internal_strchr(templ, '$');
+    if (tpos == 0 || (tpos1 && tpos1 < tpos))
+      tpos = tpos1;
+    if (tpos != 0)
+      tpos[0] = 0;
+    const char *str0 = str;
+    const char *spos = internal_strstr(str, templ);
+    str = spos + internal_strlen(templ);
+    templ = tpos;
+    if (tpos)
+      tpos[0] = tpos == tpos1 ? '$' : '*';
+    if (spos == 0)
+      return false;
+    if (start && spos != str0)
+      return false;
+    start = false;
+    asterisk = false;
+  }
+  return true;
+}
+
 }  // namespace __sanitizer
 
 using namespace __sanitizer;  // NOLINT
 
 extern "C" {
 void __sanitizer_set_report_path(const char *path) {
-  if (!path)
-    return;
-  uptr len = internal_strlen(path);
-  if (len > sizeof(report_path_prefix) - 100) {
-    Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
-           path[0], path[1], path[2], path[3],
-           path[4], path[5], path[6], path[7]);
-    Die();
-  }
-  if (report_fd != kStdoutFd &&
-      report_fd != kStderrFd &&
-      report_fd != kInvalidFd)
-    internal_close(report_fd);
-  report_fd = kInvalidFd;
-  log_to_file = false;
-  if (internal_strcmp(path, "stdout") == 0) {
-    report_fd = kStdoutFd;
-  } else if (internal_strcmp(path, "stderr") == 0) {
-    report_fd = kStderrFd;
-  } else {
-    internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
-    report_path_prefix[len] = '\0';
-    log_to_file = true;
-  }
+  report_file.SetReportPath(path);
 }
 
 void __sanitizer_report_error_summary(const char *error_summary) {
   Printf("%s\n", error_summary);
 }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_set_death_callback(void (*callback)(void)) {
+  SetUserDieCallback(callback);
+}
 }  // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h
index c1e2101..ff13ef1 100644
--- a/lib/sanitizer_common/sanitizer_common.h
+++ b/lib/sanitizer_common/sanitizer_common.h
@@ -7,8 +7,8 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file is shared between AddressSanitizer and ThreadSanitizer
-// run-time libraries.
+// This file is shared between run-time libraries of sanitizers.
+//
 // It declares common functions and classes that are used in both runtimes.
 // Implementation of some functions are provided in sanitizer_common, while
 // others must be defined by run-time library itself.
@@ -16,10 +16,12 @@
 #ifndef SANITIZER_COMMON_H
 #define SANITIZER_COMMON_H
 
+#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
 #include "sanitizer_internal_defs.h"
 #include "sanitizer_libc.h"
+#include "sanitizer_list.h"
 #include "sanitizer_mutex.h"
-#include "sanitizer_flags.h"
 
 namespace __sanitizer {
 struct StackTrace;
@@ -34,12 +36,20 @@
   const uptr kCacheLineSize = 64;
 #endif
 
-const uptr kMaxPathLength = 512;
+const uptr kMaxPathLength = 4096;
 
 const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
 
 extern const char *SanitizerToolName;  // Can be changed by the tool.
 
+extern atomic_uint32_t current_verbosity;
+INLINE void SetVerbosity(int verbosity) {
+  atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
+}
+INLINE int Verbosity() {
+  return atomic_load(&current_verbosity, memory_order_relaxed);
+}
+
 uptr GetPageSize();
 uptr GetPageSizeCached();
 uptr GetMmapGranularity();
@@ -66,6 +76,9 @@
 void FlushUnneededShadowMemory(uptr addr, uptr size);
 void IncreaseTotalMmap(uptr size);
 void DecreaseTotalMmap(uptr size);
+uptr GetRSS();
+void NoHugePagesInRegion(uptr addr, uptr length);
+void DontDumpShadowMemory(uptr addr, uptr length);
 
 // InternalScopedBuffer can be used instead of large stack arrays to
 // keep frame size low.
@@ -128,29 +141,48 @@
 
 // IO
 void RawWrite(const char *buffer);
-bool PrintsToTty();
-// Caching version of PrintsToTty(). Not thread-safe.
-bool PrintsToTtyCached();
 bool ColorizeReports();
 void Printf(const char *format, ...);
 void Report(const char *format, ...);
 void SetPrintfAndReportCallback(void (*callback)(const char *));
 #define VReport(level, ...)                                              \
   do {                                                                   \
-    if ((uptr)common_flags()->verbosity >= (level)) Report(__VA_ARGS__); \
+    if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
   } while (0)
 #define VPrintf(level, ...)                                              \
   do {                                                                   \
-    if ((uptr)common_flags()->verbosity >= (level)) Printf(__VA_ARGS__); \
+    if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
   } while (0)
 
 // Can be used to prevent mixing error reports from different sanitizers.
 extern StaticSpinMutex CommonSanitizerReportMutex;
-void MaybeOpenReportFile();
-extern fd_t report_fd;
-extern bool log_to_file;
-extern char report_path_prefix[4096];
-extern uptr report_fd_pid;
+
+struct ReportFile {
+  void Write(const char *buffer, uptr length);
+  bool PrintsToTty();
+  void SetReportPath(const char *path);
+
+  // Don't use fields directly. They are only declared public to allow
+  // aggregate initialization.
+
+  // Protects fields below.
+  StaticSpinMutex *mu;
+  // Opened file descriptor. Defaults to stderr. It may be equal to
+  // kInvalidFd, in which case new file will be opened when necessary.
+  fd_t fd;
+  // Path prefix of report file, set via __sanitizer_set_report_path.
+  char path_prefix[kMaxPathLength];
+  // Full path to report, obtained as <path_prefix>.PID
+  char full_path[kMaxPathLength];
+  // PID of the process that opened fd. If a fork() occurs,
+  // the PID of child will be different from fd_pid.
+  uptr fd_pid;
+
+ private:
+  void ReopenIfNecessary();
+};
+extern ReportFile report_file;
+
 extern uptr stoptheworld_tracer_pid;
 extern uptr stoptheworld_tracer_ppid;
 
@@ -159,8 +191,8 @@
 // The resulting buffer is mmaped and stored in '*buff'.
 // The size of the mmaped region is stored in '*buff_size',
 // Returns the number of read bytes or 0 if file can not be opened.
-uptr ReadFileToBuffer(const char *file_name, char **buff,
-                      uptr *buff_size, uptr max_len);
+uptr ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+                      uptr max_len, int *errno_p = nullptr);
 // Maps given file to virtual memory, and returns pointer to it
 // (or NULL if the mapping failes). Stores the size of mmaped region
 // in '*buff_size'.
@@ -194,10 +226,13 @@
 void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
 void SetSandboxingCallback(void (*f)());
 
-void CovUpdateMapping(uptr caller_pc = 0);
+void CoverageUpdateMapping();
 void CovBeforeFork();
 void CovAfterFork(int child_pid);
 
+void InitializeCoverage(bool enabled, const char *coverage_dir);
+void ReInitializeCoverage(bool enabled, const char *coverage_dir);
+
 void InitTlsSize();
 uptr GetTlsSize();
 
@@ -207,6 +242,7 @@
 u64 NanoTime();
 int Atexit(void (*function)(void));
 void SortArray(uptr *array, uptr size);
+bool TemplateMatch(const char *templ, const char *str);
 
 // Exit
 void NORETURN Abort();
@@ -225,11 +261,18 @@
 // to do tool-specific job.
 typedef void (*DieCallbackType)(void);
 void SetDieCallback(DieCallbackType);
+void SetUserDieCallback(DieCallbackType);
 DieCallbackType GetDieCallback();
 typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
                                        u64, u64);
 void SetCheckFailedCallback(CheckFailedCallbackType callback);
 
+// Callback will be called if soft_rss_limit_mb is given and the limit is
+// exceeded (exceeded==true) or if rss went down below the limit
+// (exceeded==false).
+// The callback should be registered once at the tool init time.
+void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
+
 // Functions related to signal handling.
 typedef void (*SignalHandlerType)(int, void *, void *);
 bool IsDeadlySignal(int signum);
@@ -356,14 +399,14 @@
 // small vectors.
 // WARNING: The current implementation supports only POD types.
 template<typename T>
-class InternalMmapVector {
+class InternalMmapVectorNoCtor {
  public:
-  explicit InternalMmapVector(uptr initial_capacity) {
+  void Initialize(uptr initial_capacity) {
     capacity_ = Max(initial_capacity, (uptr)1);
     size_ = 0;
-    data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVector");
+    data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
   }
-  ~InternalMmapVector() {
+  void Destroy() {
     UnmapOrDie(data_, capacity_ * sizeof(T));
   }
   T &operator[](uptr i) {
@@ -414,15 +457,24 @@
     UnmapOrDie(old_data, capacity_ * sizeof(T));
     capacity_ = new_capacity;
   }
-  // Disallow evil constructors.
-  InternalMmapVector(const InternalMmapVector&);
-  void operator=(const InternalMmapVector&);
 
   T *data_;
   uptr capacity_;
   uptr size_;
 };
 
+template<typename T>
+class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
+ public:
+  explicit InternalMmapVector(uptr initial_capacity) {
+    InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
+  }
+  ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
+  // Disallow evil constructors.
+  InternalMmapVector(const InternalMmapVector&);
+  void operator=(const InternalMmapVector&);
+};
+
 // HeapSort for arrays and InternalMmapVector.
 template<class Container, class Compare>
 void InternalSort(Container *v, uptr size, Compare comp) {
@@ -481,28 +533,30 @@
 class LoadedModule {
  public:
   LoadedModule(const char *module_name, uptr base_address);
+  void clear();
   void addAddressRange(uptr beg, uptr end, bool executable);
   bool containsAddress(uptr address) const;
 
   const char *full_name() const { return full_name_; }
   uptr base_address() const { return base_address_; }
 
-  uptr n_ranges() const { return n_ranges_; }
-  uptr address_range_start(int i) const { return ranges_[i].beg; }
-  uptr address_range_end(int i) const { return ranges_[i].end; }
-  bool address_range_executable(int i) const { return exec_[i]; }
-
- private:
   struct AddressRange {
+    AddressRange *next;
     uptr beg;
     uptr end;
+    bool executable;
+
+    AddressRange(uptr beg, uptr end, bool executable)
+        : next(nullptr), beg(beg), end(end), executable(executable) {}
   };
-  char *full_name_;
+
+  typedef IntrusiveList<AddressRange>::ConstIterator Iterator;
+  Iterator ranges() const { return Iterator(&ranges_); }
+
+ private:
+  char *full_name_;  // Owned.
   uptr base_address_;
-  static const uptr kMaxNumberOfAddressRanges = 6;
-  AddressRange ranges_[kMaxNumberOfAddressRanges];
-  bool exec_[kMaxNumberOfAddressRanges];
-  uptr n_ranges_;
+  IntrusiveList<AddressRange> ranges_;
 };
 
 // OS-dependent function that fills array with descriptions of at most
@@ -535,6 +589,24 @@
 INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
 INLINE void SanitizerInitializeUnwinder() {}
 #endif
+
+void *internal_start_thread(void(*func)(void*), void *arg);
+void internal_join_thread(void *th);
+void MaybeStartBackgroudThread();
+
+// Make the compiler think that something is going on there.
+// Use this inside a loop that looks like memset/memcpy/etc to prevent the
+// compiler from recognising it and turning it into an actual call to
+// memset/memcpy/etc.
+static inline void SanitizerBreakOptimization(void *arg) {
+#if _MSC_VER
+  // FIXME: make sure this is actually enough.
+  __asm;
+#else
+  __asm__ __volatile__("" : : "r" (arg) : "memory");
+#endif
+}
+
 }  // namespace __sanitizer
 
 inline void *operator new(__sanitizer::operator_new_size_type size,
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc
index 274e87c..f724115 100644
--- a/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -17,10 +17,12 @@
 //   COMMON_INTERCEPTOR_READ_RANGE
 //   COMMON_INTERCEPTOR_WRITE_RANGE
 //   COMMON_INTERCEPTOR_INITIALIZE_RANGE
+//   COMMON_INTERCEPTOR_DIR_ACQUIRE
 //   COMMON_INTERCEPTOR_FD_ACQUIRE
 //   COMMON_INTERCEPTOR_FD_RELEASE
 //   COMMON_INTERCEPTOR_FD_ACCESS
 //   COMMON_INTERCEPTOR_SET_THREAD_NAME
+//   COMMON_INTERCEPTOR_ON_DLOPEN
 //   COMMON_INTERCEPTOR_ON_EXIT
 //   COMMON_INTERCEPTOR_MUTEX_LOCK
 //   COMMON_INTERCEPTOR_MUTEX_UNLOCK
@@ -43,6 +45,8 @@
 
 #if SANITIZER_FREEBSD
 #define pthread_setname_np pthread_set_name_np
+#define inet_aton __inet_aton
+#define inet_pton __inet_pton
 #endif
 
 #ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
@@ -82,7 +86,7 @@
 #endif
 
 #ifndef COMMON_INTERCEPTOR_LIBRARY_LOADED
-#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, map) {}
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) {}
 #endif
 
 #ifndef COMMON_INTERCEPTOR_LIBRARY_UNLOADED
@@ -98,6 +102,10 @@
 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0)
 #endif
 
+#ifndef COMMON_INTERCEPTOR_ON_DLOPEN
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) {}
+#endif
+
 struct FileMetadata {
   // For open_memstream().
   char **addr;
@@ -915,6 +923,16 @@
             va_list ap)
 VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)
 
+#if SANITIZER_INTERCEPT_PRINTF_L
+INTERCEPTOR(int, vsnprintf_l, char *str, SIZE_T size, void *loc,
+            const char *format, va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf_l, str, size, loc, format, ap)
+
+INTERCEPTOR(int, snprintf_l, char *str, SIZE_T size, void *loc,
+            const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(snprintf_l, vsnprintf_l, str, size, loc, format)
+#endif  // SANITIZER_INTERCEPT_PRINTF_L
+
 INTERCEPTOR(int, vsprintf, char *str, const char *format, va_list ap)
 VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)
 
@@ -991,6 +1009,14 @@
 #define INIT_PRINTF
 #endif
 
+#if SANITIZER_INTERCEPT_PRINTF_L
+#define INIT_PRINTF_L                     \
+  COMMON_INTERCEPT_FUNCTION(snprintf_l);  \
+  COMMON_INTERCEPT_FUNCTION(vsnprintf_l);
+#else
+#define INIT_PRINTF_L
+#endif
+
 #if SANITIZER_INTERCEPT_ISOC99_PRINTF
 #define INIT_ISOC99_PRINTF                       \
   COMMON_INTERCEPT_FUNCTION(__isoc99_printf);    \
@@ -1007,8 +1033,18 @@
 
 #if SANITIZER_INTERCEPT_IOCTL
 #include "sanitizer_common_interceptors_ioctl.inc"
-INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
+INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) {
+  // We need a frame pointer, because we call into ioctl_common_[pre|post] which
+  // can trigger a report and we need to be able to unwind through this
+  // function.  On Mac in debug mode we might not have a frame pointer, because
+  // ioctl_common_[pre|post] doesn't get inlined here.
+  ENABLE_FRAME_POINTER;
+
   void *ctx;
+  va_list ap;
+  va_start(ap, request);
+  void *arg = va_arg(ap, void *);
+  va_end(ap);
   COMMON_INTERCEPTOR_ENTER(ctx, ioctl, d, request, arg);
 
   CHECK(ioctl_initialized);
@@ -1017,6 +1053,10 @@
   // This effectively disables ioctl handling in TSan.
   if (!common_flags()->handle_ioctl) return REAL(ioctl)(d, request, arg);
 
+  // Although request is unsigned long, the rest of the interceptor uses it
+  // as just "unsigned" to save space, because we know that all values fit in
+  // "unsigned" - they are compile-time constants.
+
   const ioctl_desc *desc = ioctl_lookup(request);
   ioctl_desc decoded_desc;
   if (!desc) {
@@ -2139,6 +2179,16 @@
 #endif
 
 #if SANITIZER_INTERCEPT_READDIR
+INTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) {
+  void *ctx;
+  COMMON_INTERCEPTOR_ENTER(ctx, opendir, path);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  __sanitizer_dirent *res = REAL(opendir)(path);
+  if (res != 0)
+    COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path);
+  return res;
+}
+
 INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp);
@@ -2167,6 +2217,7 @@
 }
 
 #define INIT_READDIR                  \
+  COMMON_INTERCEPT_FUNCTION(opendir); \
   COMMON_INTERCEPT_FUNCTION(readdir); \
   COMMON_INTERCEPT_FUNCTION(readdir_r);
 #else
@@ -2560,6 +2611,19 @@
 #define INIT_SCHED_GETAFFINITY
 #endif
 
+#if SANITIZER_INTERCEPT_SCHED_GETPARAM
+INTERCEPTOR(int, sched_getparam, int pid, void *param) {
+  void *ctx;
+  COMMON_INTERCEPTOR_ENTER(ctx, sched_getparam, pid, param);
+  int res = REAL(sched_getparam)(pid, param);
+  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, struct_sched_param_sz);
+  return res;
+}
+#define INIT_SCHED_GETPARAM COMMON_INTERCEPT_FUNCTION(sched_getparam);
+#else
+#define INIT_SCHED_GETPARAM
+#endif
+
 #if SANITIZER_INTERCEPT_STRERROR
 INTERCEPTOR(char *, strerror, int errnum) {
   void *ctx;
@@ -3868,6 +3932,12 @@
 
 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
 #define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
+// If you see any crashes around this functions, there are 2 known issues with
+// it: 1. __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// 2. It can be called recursively if sanitizer code uses __tls_get_addr
+// to access thread local variables (it should not happen normally,
+// because sanitizers use initial-exec tls model).
 INTERCEPTOR(void *, __tls_get_addr, void *arg) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg);
@@ -4629,6 +4699,7 @@
 INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
+  COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag);
   void *res = REAL(dlopen)(filename, flag);
   COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
   return res;
@@ -4762,6 +4833,7 @@
   INIT_SCANF;
   INIT_ISOC99_SCANF;
   INIT_PRINTF;
+  INIT_PRINTF_L;
   INIT_ISOC99_PRINTF;
   INIT_FREXP;
   INIT_FREXPF_FREXPL;
@@ -4812,6 +4884,7 @@
   INIT_CANONICALIZE_FILE_NAME;
   INIT_CONFSTR;
   INIT_SCHED_GETAFFINITY;
+  INIT_SCHED_GETPARAM;
   INIT_STRERROR;
   INIT_STRERROR_R;
   INIT_XPG_STRERROR_R;
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
old mode 100755
new mode 100644
diff --git a/lib/sanitizer_common/sanitizer_common_libcdep.cc b/lib/sanitizer_common/sanitizer_common_libcdep.cc
index 4374f56..17ef689 100644
--- a/lib/sanitizer_common/sanitizer_common_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_common_libcdep.cc
@@ -13,35 +13,27 @@
 
 #include "sanitizer_common.h"
 #include "sanitizer_flags.h"
+#include "sanitizer_stackdepot.h"
 #include "sanitizer_stacktrace.h"
 #include "sanitizer_symbolizer.h"
 
 namespace __sanitizer {
 
-bool PrintsToTty() {
-  MaybeOpenReportFile();
-  return internal_isatty(report_fd) != 0;
-}
-
-bool PrintsToTtyCached() {
-  // FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
-  // printing on Windows.
-  if (SANITIZER_WINDOWS)
-    return 0;
-
-  static int cached = 0;
-  static bool prints_to_tty;
-  if (!cached) {  // Not thread-safe.
-    prints_to_tty = PrintsToTty();
-    cached = 1;
-  }
-  return prints_to_tty;
+bool ReportFile::PrintsToTty() {
+  SpinMutexLock l(mu);
+  ReopenIfNecessary();
+  return internal_isatty(fd) != 0;
 }
 
 bool ColorizeReports() {
+  // FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
+  // printing on Windows.
+  if (SANITIZER_WINDOWS)
+    return false;
+
   const char *flag = common_flags()->color;
   return internal_strcmp(flag, "always") == 0 ||
-         (internal_strcmp(flag, "auto") == 0 && PrintsToTtyCached());
+         (internal_strcmp(flag, "auto") == 0 && report_file.PrintsToTty());
 }
 
 static void (*sandboxing_callback)();
@@ -52,16 +44,85 @@
 void ReportErrorSummary(const char *error_type, StackTrace *stack) {
   if (!common_flags()->print_summary)
     return;
-  AddressInfo ai;
 #if !SANITIZER_GO
   if (stack->size > 0 && Symbolizer::GetOrInit()->CanReturnFileLineInfo()) {
     // Currently, we include the first stack frame into the report summary.
     // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
     uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
-    Symbolizer::GetOrInit()->SymbolizePC(pc, &ai, 1);
+    SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+    const AddressInfo &ai = frame->info;
+    ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
+    frame->ClearAll();
   }
-#endif
+#else
+  AddressInfo ai;
   ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
+#endif
+}
+
+static void (*SoftRssLimitExceededCallback)(bool exceeded);
+void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
+  CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
+  SoftRssLimitExceededCallback = Callback;
+}
+
+void BackgroundThread(void *arg) {
+  uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
+  uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
+  uptr prev_reported_rss = 0;
+  uptr prev_reported_stack_depot_size = 0;
+  bool reached_soft_rss_limit = false;
+  while (true) {
+    SleepForMillis(100);
+    uptr current_rss_mb = GetRSS() >> 20;
+    if (Verbosity()) {
+      // If RSS has grown 10% since last time, print some information.
+      if (prev_reported_rss * 11 / 10 < current_rss_mb) {
+        Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb);
+        prev_reported_rss = current_rss_mb;
+      }
+      // If stack depot has grown 10% since last time, print it too.
+      StackDepotStats *stack_depot_stats = StackDepotGetStats();
+      if (prev_reported_stack_depot_size * 11 / 10 <
+          stack_depot_stats->allocated) {
+        Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
+               SanitizerToolName,
+               stack_depot_stats->n_uniq_ids,
+               stack_depot_stats->allocated >> 20);
+        prev_reported_stack_depot_size = stack_depot_stats->allocated;
+      }
+    }
+    // Check RSS against the limit.
+    if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {
+      Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n",
+             SanitizerToolName, hard_rss_limit_mb, current_rss_mb);
+      DumpProcessMap();
+      Die();
+    }
+    if (soft_rss_limit_mb) {
+      if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {
+        reached_soft_rss_limit = true;
+        Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
+               SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
+        if (SoftRssLimitExceededCallback)
+          SoftRssLimitExceededCallback(true);
+      } else if (soft_rss_limit_mb >= current_rss_mb &&
+                 reached_soft_rss_limit) {
+        reached_soft_rss_limit = false;
+        if (SoftRssLimitExceededCallback)
+          SoftRssLimitExceededCallback(false);
+      }
+    }
+  }
+}
+
+void MaybeStartBackgroudThread() {
+  if (!SANITIZER_LINUX) return;  // Need to implement/test on other platforms.
+  // Start the background thread if one of the rss limits is given.
+  if (!common_flags()->hard_rss_limit_mb &&
+      !common_flags()->soft_rss_limit_mb) return;
+  if (!&real_pthread_create) return;  // Can't spawn the thread anyway.
+  internal_start_thread(BackgroundThread, nullptr);
 }
 
 }  // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_common_syscalls.inc b/lib/sanitizer_common/sanitizer_common_syscalls.inc
index a52338b..f2c054e 100644
--- a/lib/sanitizer_common/sanitizer_common_syscalls.inc
+++ b/lib/sanitizer_common/sanitizer_common_syscalls.inc
@@ -1443,6 +1443,7 @@
 
 POST_SYSCALL(fchown)(long res, long fd, long user, long group) {}
 
+#if SANITIZER_USES_UID16_SYSCALLS
 PRE_SYSCALL(chown16)(const void *filename, long user, long group) {
   if (filename)
     PRE_READ(filename,
@@ -1552,6 +1553,7 @@
 PRE_SYSCALL(getegid16)() {}
 
 POST_SYSCALL(getegid16)(long res) {}
+#endif // SANITIZER_USES_UID16_SYSCALLS
 
 PRE_SYSCALL(utime)(void *filename, void *times) {}
 
@@ -2297,7 +2299,8 @@
 POST_SYSCALL(ni_syscall)(long res) {}
 
 PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
+#if !SANITIZER_ANDROID && \
+    (defined(__i386) || defined(__x86_64) || defined(__mips64))
   if (data) {
     if (request == ptrace_setregs) {
       PRE_READ((void *)data, struct_user_regs_struct_sz);
@@ -2316,7 +2319,8 @@
 }
 
 POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
+#if !SANITIZER_ANDROID && \
+    (defined(__i386) || defined(__x86_64) || defined(__mips64))
   if (res >= 0 && data) {
     // Note that this is different from the interceptor in
     // sanitizer_common_interceptors.inc.
diff --git a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
index bd98adb..49887b1 100644
--- a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
@@ -12,14 +12,16 @@
 //
 // Compiler instrumentation:
 // For every interesting basic block the compiler injects the following code:
-// if (*Guard) {
-//    __sanitizer_cov();
-//    *Guard = 1;
+// if (Guard < 0) {
+//    __sanitizer_cov(&Guard);
 // }
+// At the module start up time __sanitizer_cov_module_init sets the guards
+// to consecutive negative numbers (-1, -2, -3, ...).
 // It's fine to call __sanitizer_cov more than once for a given block.
 //
 // Run-time:
 //  - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
+//    and atomically set Guard to -Guard.
 //  - __sanitizer_cov_dump: dump the coverage data to disk.
 //  For every module of the current process that has coverage data
 //  this will create a file module_name.PID.sancov. The file format is simple:
@@ -56,23 +58,32 @@
 static bool cov_sandboxed = false;
 static int cov_fd = kInvalidFd;
 static unsigned int cov_max_block_size = 0;
+static bool coverage_enabled = false;
+static const char *coverage_dir;
 
 namespace __sanitizer {
 
 class CoverageData {
  public:
   void Init();
+  void Enable();
+  void Disable();
+  void ReInit();
   void BeforeFork();
   void AfterFork(int child_pid);
   void Extend(uptr npcs);
-  void Add(uptr pc);
+  void Add(uptr pc, u32 *guard);
   void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
                  uptr cache_size);
   void DumpCallerCalleePairs();
   void DumpTrace();
 
   ALWAYS_INLINE
-  void TraceBasicaBlock(uptr *cache);
+  void TraceBasicBlock(s32 *id);
+
+  void InitializeGuardArray(s32 *guards);
+  void InitializeGuards(s32 *guards, uptr n, const char *module_name);
+  void ReinitializeGuards();
 
   uptr *data();
   uptr size();
@@ -80,7 +91,7 @@
  private:
   // Maximal size pc array may ever grow.
   // We MmapNoReserve this space to ensure that the array is contiguous.
-  static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
+  static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 24, 1 << 27);
   // The amount file mapping for the pc array is grown by.
   static const uptr kPcArrayMmapSize = 64 * 1024;
 
@@ -96,45 +107,44 @@
   // Descriptor of the file mapped pc array.
   int pc_fd;
 
+  // Vector of coverage guard arrays, protected by mu.
+  InternalMmapVectorNoCtor<s32*> guard_array_vec;
+
+  // Vector of module (compilation unit) names.
+  InternalMmapVectorNoCtor<const char*> comp_unit_name_vec;
+
   // Caller-Callee (cc) array, size and current index.
   static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
   uptr **cc_array;
   atomic_uintptr_t cc_array_index;
   atomic_uintptr_t cc_array_size;
 
-  // Tracing (tr) pc and event arrays, their size and current index.
+  // Tracing event array, size and current pointer.
   // We record all events (basic block entries) in a global buffer of u32
-  // values. Each such value is an index in the table of TracedPc objects.
+  // values. Each such value is the index in pc_array.
   // So far the tracing is highly experimental:
   //   - not thread-safe;
   //   - does not support long traces;
   //   - not tuned for performance.
-  struct TracedPc {
-    uptr pc;
-    const char *module_name;
-    uptr module_offset;
-  };
   static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
   u32 *tr_event_array;
   uptr tr_event_array_size;
-  uptr tr_event_array_index;
+  u32 *tr_event_pointer;
   static const uptr kTrPcArrayMaxSize    = FIRST_32_SECOND_64(1 << 22, 1 << 27);
-  TracedPc *tr_pc_array;
-  uptr tr_pc_array_size;
-  uptr tr_pc_array_index;
 
   StaticSpinMutex mu;
 
   void DirectOpen();
-  void ReInit();
 };
 
 static CoverageData coverage_data;
 
+void CovUpdateMapping(const char *path, uptr caller_pc = 0);
+
 void CoverageData::DirectOpen() {
-  InternalScopedString path(1024);
+  InternalScopedString path(kMaxPathLength);
   internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
-                    common_flags()->coverage_dir, internal_getpid());
+                    coverage_dir, internal_getpid());
   pc_fd = OpenFile(path.data(), true);
   if (internal_iserror(pc_fd)) {
     Report(" Coverage: failed to open %s for writing\n", path.data());
@@ -142,19 +152,23 @@
   }
 
   pc_array_mapped_size = 0;
-  CovUpdateMapping();
+  CovUpdateMapping(coverage_dir);
 }
 
 void CoverageData::Init() {
+  pc_fd = kInvalidFd;
+}
+
+void CoverageData::Enable() {
+  if (pc_array)
+    return;
   pc_array = reinterpret_cast<uptr *>(
       MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
-  pc_fd = kInvalidFd;
+  atomic_store(&pc_array_index, 0, memory_order_relaxed);
   if (common_flags()->coverage_direct) {
     atomic_store(&pc_array_size, 0, memory_order_relaxed);
-    atomic_store(&pc_array_index, 0, memory_order_relaxed);
   } else {
     atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
-    atomic_store(&pc_array_index, 0, memory_order_relaxed);
   }
 
   cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
@@ -162,30 +176,72 @@
   atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
   atomic_store(&cc_array_index, 0, memory_order_relaxed);
 
-  tr_event_array = reinterpret_cast<u32 *>(
-      MmapNoReserveOrDie(sizeof(tr_event_array[0]) * kTrEventArrayMaxSize,
-                         "CovInit::tr_event_array"));
+  // Allocate tr_event_array with a guard page at the end.
+  tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
+      sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
+      "CovInit::tr_event_array"));
+  Mprotect(reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
+           GetMmapGranularity());
   tr_event_array_size = kTrEventArrayMaxSize;
-  tr_event_array_index = 0;
+  tr_event_pointer = tr_event_array;
+}
 
-  tr_pc_array = reinterpret_cast<TracedPc *>(MmapNoReserveOrDie(
-      sizeof(tr_pc_array[0]) * kTrEventArrayMaxSize, "CovInit::tr_pc_array"));
-  tr_pc_array_size = kTrEventArrayMaxSize;
-  tr_pc_array_index = 0;
+void CoverageData::InitializeGuardArray(s32 *guards) {
+  Enable();  // Make sure coverage is enabled at this point.
+  s32 n = guards[0];
+  for (s32 j = 1; j <= n; j++) {
+    uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
+    guards[j] = -static_cast<s32>(idx + 1);
+  }
+}
+
+void CoverageData::Disable() {
+  if (pc_array) {
+    internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize);
+    pc_array = nullptr;
+  }
+  if (cc_array) {
+    internal_munmap(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
+    cc_array = nullptr;
+  }
+  if (tr_event_array) {
+    internal_munmap(tr_event_array,
+                    sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
+                        GetMmapGranularity());
+    tr_event_array = nullptr;
+    tr_event_pointer = nullptr;
+  }
+  if (pc_fd != kInvalidFd) {
+    internal_close(pc_fd);
+    pc_fd = kInvalidFd;
+  }
+}
+
+void CoverageData::ReinitializeGuards() {
+  // Assuming single thread.
+  atomic_store(&pc_array_index, 0, memory_order_relaxed);
+  for (uptr i = 0; i < guard_array_vec.size(); i++)
+    InitializeGuardArray(guard_array_vec[i]);
 }
 
 void CoverageData::ReInit() {
-  internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize);
-  if (pc_fd != kInvalidFd) internal_close(pc_fd);
-  if (common_flags()->coverage_direct) {
-    // In memory-mapped mode we must extend the new file to the known array
-    // size.
-    uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
-    Init();
-    if (size) Extend(size);
-  } else {
-    Init();
+  Disable();
+  if (coverage_enabled) {
+    if (common_flags()->coverage_direct) {
+      // In memory-mapped mode we must extend the new file to the known array
+      // size.
+      uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
+      Enable();
+      if (size) Extend(size);
+      if (coverage_enabled) CovUpdateMapping(coverage_dir);
+    } else {
+      Enable();
+    }
   }
+  // Re-initialize the guards.
+  // We are single-threaded now, no need to grab any lock.
+  CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
+  ReinitializeGuards();
 }
 
 void CoverageData::BeforeFork() {
@@ -203,15 +259,16 @@
   if (!common_flags()->coverage_direct) return;
   SpinMutexLock l(&mu);
 
-  if (pc_fd == kInvalidFd) DirectOpen();
-  CHECK_NE(pc_fd, kInvalidFd);
-
   uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
   size += npcs * sizeof(uptr);
 
-  if (size > pc_array_mapped_size) {
+  if (coverage_enabled && size > pc_array_mapped_size) {
+    if (pc_fd == kInvalidFd) DirectOpen();
+    CHECK_NE(pc_fd, kInvalidFd);
+
     uptr new_mapped_size = pc_array_mapped_size;
     while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
+    CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
 
     // Extend the file and map the new space at the end of pc_array.
     uptr res = internal_ftruncate(pc_fd, new_mapped_size);
@@ -220,21 +277,43 @@
       Printf("failed to extend raw coverage file: %d\n", err);
       Die();
     }
-    void *p = MapWritableFileToMemory(pc_array + pc_array_mapped_size,
+
+    uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
+    void *p = MapWritableFileToMemory((void *)next_map_base,
                                       new_mapped_size - pc_array_mapped_size,
                                       pc_fd, pc_array_mapped_size);
-    CHECK_EQ(p, pc_array + pc_array_mapped_size);
+    CHECK_EQ((uptr)p, next_map_base);
     pc_array_mapped_size = new_mapped_size;
   }
 
   atomic_store(&pc_array_size, size, memory_order_release);
 }
 
-// Simply add the pc into the vector under lock. If the function is called more
-// than once for a given PC it will be inserted multiple times, which is fine.
-void CoverageData::Add(uptr pc) {
+void CoverageData::InitializeGuards(s32 *guards, uptr n,
+                                    const char *module_name) {
+  // The array 'guards' has n+1 elements, we use the element zero
+  // to store 'n'.
+  CHECK_LT(n, 1 << 30);
+  guards[0] = static_cast<s32>(n);
+  InitializeGuardArray(guards);
+  SpinMutexLock l(&mu);
+  comp_unit_name_vec.push_back(module_name);
+  guard_array_vec.push_back(guards);
+}
+
+// If guard is negative, atomically set it to -guard and store the PC in
+// pc_array.
+void CoverageData::Add(uptr pc, u32 *guard) {
+  atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
+  s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
+  if (guard_value >= 0) return;
+
+  atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
   if (!pc_array) return;
-  uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
+
+  uptr idx = -guard_value - 1;
+  if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
+    return;  // May happen after fork when pc_array_index becomes 0.
   CHECK_LT(idx * sizeof(uptr),
            atomic_load(&pc_array_size, memory_order_acquire));
   pc_array[idx] = pc;
@@ -334,20 +413,19 @@
 // If packed = true and name == 0: <pid>.<sancov>.<packed>.
 // If packed = true and name != 0: <name>.<sancov>.<packed> (name is
 // user-supplied).
-static int CovOpenFile(bool packed, const char* name) {
-  InternalScopedBuffer<char> path(1024);
+static int CovOpenFile(bool packed, const char *name,
+                       const char *extension = "sancov") {
+  InternalScopedString path(kMaxPathLength);
   if (!packed) {
     CHECK(name);
-    internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov",
-                      common_flags()->coverage_dir, name, internal_getpid());
+    path.append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
+                extension);
   } else {
     if (!name)
-      internal_snprintf((char *)path.data(), path.size(),
-                        "%s/%zd.sancov.packed", common_flags()->coverage_dir,
-                        internal_getpid());
+      path.append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
+                  extension);
     else
-      internal_snprintf((char *)path.data(), path.size(), "%s/%s.sancov.packed",
-                        common_flags()->coverage_dir, name);
+      path.append("%s/%s.%s.packed", coverage_dir, name, extension);
   }
   uptr fd = OpenFile(path.data(), true);
   if (internal_iserror(fd)) {
@@ -359,35 +437,49 @@
 
 // Dump trace PCs and trace events into two separate files.
 void CoverageData::DumpTrace() {
-  uptr max_idx = tr_event_array_index;
+  uptr max_idx = tr_event_pointer - tr_event_array;
   if (!max_idx) return;
   auto sym = Symbolizer::GetOrInit();
   if (!sym)
     return;
   InternalScopedString out(32 << 20);
-  for (uptr i = 0; i < max_idx; i++) {
-    u32 pc_idx = tr_event_array[i];
-    TracedPc *t = &tr_pc_array[pc_idx];
-    if (!t->module_name) {
-      const char *module_name = "<unknown>";
-      uptr module_address = 0;
-      sym->GetModuleNameAndOffsetForPC(t->pc, &module_name, &module_address);
-      t->module_name = internal_strdup(module_name);
-      t->module_offset = module_address;
-      out.append("%s 0x%zx\n", t->module_name, t->module_offset);
-    }
+  for (uptr i = 0, n = size(); i < n; i++) {
+    const char *module_name = "<unknown>";
+    uptr module_address = 0;
+    sym->GetModuleNameAndOffsetForPC(pc_array[i], &module_name,
+                                     &module_address);
+    out.append("%s 0x%zx\n", module_name, module_address);
   }
   int fd = CovOpenFile(false, "trace-points");
   if (fd < 0) return;
   internal_write(fd, out.data(), out.length());
   internal_close(fd);
 
+  fd = CovOpenFile(false, "trace-compunits");
+  if (fd < 0) return;
+  out.clear();
+  for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
+    out.append("%s\n", comp_unit_name_vec[i]);
+  internal_write(fd, out.data(), out.length());
+  internal_close(fd);
+
   fd = CovOpenFile(false, "trace-events");
   if (fd < 0) return;
-  internal_write(fd, tr_event_array, max_idx * sizeof(tr_event_array[0]));
+  uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
+  u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
+  // The trace file could be huge, and may not be written with a single syscall.
+  while (bytes_to_write) {
+    uptr actually_written = internal_write(fd, event_bytes, bytes_to_write);
+    if (actually_written <= bytes_to_write) {
+      bytes_to_write -= actually_written;
+      event_bytes += actually_written;
+    } else {
+      break;
+    }
+  }
   internal_close(fd);
-  VReport(1, " CovDump: Trace: %zd PCs written\n", tr_pc_array_index);
-  VReport(1, " CovDump: Trace: %zd Events written\n", tr_event_array_index);
+  VReport(1, " CovDump: Trace: %zd PCs written\n", size());
+  VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
 }
 
 // This function dumps the caller=>callee pairs into a file as a sequence of
@@ -432,28 +524,45 @@
 // Record the current PC into the event buffer.
 // Every event is a u32 value (index in tr_pc_array_index) so we compute
 // it once and then cache in the provided 'cache' storage.
-void CoverageData::TraceBasicaBlock(uptr *cache) {
-  CHECK(common_flags()->coverage);
-  uptr idx = *cache;
-  if (!idx) {
-    CHECK_LT(tr_pc_array_index, kTrPcArrayMaxSize);
-    idx = tr_pc_array_index++;
-    TracedPc *t = &tr_pc_array[idx];
-    t->pc = GET_CALLER_PC();
-    *cache = idx;
-    CHECK_LT(idx, 1U << 31);
+//
+// This function will eventually be inlined by the compiler.
+void CoverageData::TraceBasicBlock(s32 *id) {
+  // Will trap here if
+  //  1. coverage is not enabled at run-time.
+  //  2. The array tr_event_array is full.
+  *tr_event_pointer = static_cast<u32>(*id - 1);
+  tr_event_pointer++;
+}
+
+static void CovDumpAsBitSet() {
+  if (!common_flags()->coverage_bitset) return;
+  if (!coverage_data.size()) return;
+  int fd = CovOpenFile(/* packed */false, "combined", "bitset-sancov");
+  if (fd < 0) return;
+  uptr n = coverage_data.size();
+  uptr n_set_bits = 0;
+  InternalScopedBuffer<char> out(n);
+  for (uptr i = 0; i < n; i++) {
+    uptr pc = coverage_data.data()[i];
+    out[i] = pc ? '1' : '0';
+    if (pc)
+      n_set_bits++;
   }
-  CHECK_LT(tr_event_array_index, tr_event_array_size);
-  tr_event_array[tr_event_array_index] = static_cast<u32>(idx);
-  tr_event_array_index++;
+  internal_write(fd, out.data(), n);
+  internal_close(fd);
+  VReport(1, " CovDump: bitset of %zd bits written, %zd bits are set\n", n,
+          n_set_bits);
 }
 
 // Dump the coverage on disk.
 static void CovDump() {
-  if (!common_flags()->coverage || common_flags()->coverage_direct) return;
+  if (!coverage_enabled || common_flags()->coverage_direct) return;
 #if !SANITIZER_WINDOWS
   if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
     return;
+  CovDumpAsBitSet();
+  coverage_data.DumpTrace();
+  if (!common_flags()->coverage_pcs) return;
   uptr size = coverage_data.size();
   InternalMmapVector<u32> offsets(size);
   uptr *vb = coverage_data.data();
@@ -461,8 +570,8 @@
   SortArray(vb, size);
   MemoryMappingLayout proc_maps(/*cache_enabled*/true);
   uptr mb, me, off, prot;
-  InternalScopedBuffer<char> module(4096);
-  InternalScopedBuffer<char> path(4096 * 2);
+  InternalScopedString module(kMaxPathLength);
+  InternalScopedString path(kMaxPathLength);
   for (int i = 0;
        proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot);
        i++) {
@@ -488,9 +597,9 @@
         }
       } else {
         // One file per module per process.
-        internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov",
-                          common_flags()->coverage_dir, module_name,
-                          internal_getpid());
+        path.clear();
+        path.append("%s/%s.%zd.sancov", coverage_dir, module_name,
+                    internal_getpid());
         int fd = CovOpenFile(false /* packed */, module_name);
         if (fd > 0) {
           internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
@@ -504,13 +613,12 @@
   if (cov_fd >= 0)
     internal_close(cov_fd);
   coverage_data.DumpCallerCalleePairs();
-  coverage_data.DumpTrace();
 #endif  // !SANITIZER_WINDOWS
 }
 
 void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
   if (!args) return;
-  if (!common_flags()->coverage) return;
+  if (!coverage_enabled) return;
   cov_sandboxed = args->coverage_sandboxed;
   if (!cov_sandboxed) return;
   cov_fd = args->coverage_fd;
@@ -522,7 +630,7 @@
 
 int MaybeOpenCovFile(const char *name) {
   CHECK(name);
-  if (!common_flags()->coverage) return -1;
+  if (!coverage_enabled) return -1;
   return CovOpenFile(true /* packed */, name);
 }
 
@@ -534,27 +642,60 @@
   coverage_data.AfterFork(child_pid);
 }
 
+void InitializeCoverage(bool enabled, const char *dir) {
+  if (coverage_enabled)
+    return;  // May happen if two sanitizer enable coverage in the same process.
+  coverage_enabled = enabled;
+  coverage_dir = dir;
+  coverage_data.Init();
+  if (enabled) coverage_data.Enable();
+#if !SANITIZER_WINDOWS
+  if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
+#endif
+}
+
+void ReInitializeCoverage(bool enabled, const char *dir) {
+  coverage_enabled = enabled;
+  coverage_dir = dir;
+  coverage_data.ReInit();
+}
+
+void CoverageUpdateMapping() {
+  if (coverage_enabled)
+    CovUpdateMapping(coverage_dir);
+}
+
 }  // namespace __sanitizer
 
 extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() {
-  coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()));
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) {
+  coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
+                    guard);
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
+  atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
+  if (__sanitizer::atomic_load(atomic_guard, memory_order_relaxed))
+    __sanitizer_cov(guard);
 }
 SANITIZER_INTERFACE_ATTRIBUTE void
 __sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
   coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
                           callee, callee_cache16, 16);
 }
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
+  coverage_enabled = true;
+  coverage_dir = common_flags()->coverage_dir;
   coverage_data.Init();
 }
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_module_init(uptr npcs) {
-  if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
-  if (SANITIZER_ANDROID) {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
+SANITIZER_INTERFACE_ATTRIBUTE void
+__sanitizer_cov_module_init(s32 *guards, uptr npcs, const char *module_name) {
+  coverage_data.InitializeGuards(guards, npcs, module_name);
+  if (!common_flags()->coverage_direct) return;
+  if (SANITIZER_ANDROID && coverage_enabled) {
     // dlopen/dlclose interceptors do not work on Android, so we rely on
     // Extend() calls to update .sancov.map.
-    CovUpdateMapping(GET_CALLER_PC());
+    CovUpdateMapping(coverage_dir, GET_CALLER_PC());
   }
   coverage_data.Extend(npcs);
 }
@@ -568,11 +709,23 @@
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_cov_trace_func_enter(uptr *cache) {
-  coverage_data.TraceBasicaBlock(cache);
+void __sanitizer_cov_trace_func_enter(s32 *id) {
+  coverage_data.TraceBasicBlock(id);
 }
 SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_cov_trace_basic_block(uptr *cache) {
-  coverage_data.TraceBasicaBlock(cache);
+void __sanitizer_cov_trace_basic_block(s32 *id) {
+  coverage_data.TraceBasicBlock(id);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_reset_coverage() {
+  coverage_data.ReinitializeGuards();
+  internal_bzero_aligned16(
+      coverage_data.data(),
+      RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_coverage_guards(uptr **data) {
+  *data = coverage_data.data();
+  return coverage_data.size();
 }
 }  // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc b/lib/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
index dddf2f0..6b5e91f 100644
--- a/lib/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
@@ -62,8 +62,8 @@
 static CachedMapping cached_mapping;
 static StaticSpinMutex mapping_mu;
 
-void CovUpdateMapping(uptr caller_pc) {
-  if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
+void CovUpdateMapping(const char *coverage_dir, uptr caller_pc) {
+  if (!common_flags()->coverage_direct) return;
 
   SpinMutexLock l(&mapping_mu);
 
@@ -71,38 +71,41 @@
     return;
 
   InternalScopedString text(kMaxTextSize);
-  InternalScopedBuffer<char> modules_data(kMaxNumberOfModules *
-                                          sizeof(LoadedModule));
-  LoadedModule *modules = (LoadedModule *)modules_data.data();
-  CHECK(modules);
-  int n_modules = GetListOfModules(modules, kMaxNumberOfModules,
-                                   /* filter */ 0);
 
-  text.append("%d\n", sizeof(uptr) * 8);
-  for (int i = 0; i < n_modules; ++i) {
-    const char *module_name = StripModuleName(modules[i].full_name());
-    for (unsigned j = 0; j < modules[i].n_ranges(); ++j) {
-      if (modules[i].address_range_executable(j)) {
-        uptr start = modules[i].address_range_start(j);
-        uptr end = modules[i].address_range_end(j);
-        uptr base = modules[i].base_address();
-        text.append("%zx %zx %zx %s\n", start, end, base, module_name);
-        if (caller_pc && caller_pc >= start && caller_pc < end)
-          cached_mapping.SetModuleRange(start, end);
+  {
+    InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules);
+    CHECK(modules.data());
+    int n_modules = GetListOfModules(modules.data(), kMaxNumberOfModules,
+                                     /* filter */ 0);
+
+    text.append("%d\n", sizeof(uptr) * 8);
+    for (int i = 0; i < n_modules; ++i) {
+      const char *module_name = StripModuleName(modules[i].full_name());
+      uptr base = modules[i].base_address();
+      for (auto iter = modules[i].ranges(); iter.hasNext();) {
+        const auto *range = iter.next();
+        if (range->executable) {
+          uptr start = range->beg;
+          uptr end = range->end;
+          text.append("%zx %zx %zx %s\n", start, end, base, module_name);
+          if (caller_pc && caller_pc >= start && caller_pc < end)
+            cached_mapping.SetModuleRange(start, end);
+        }
       }
+      modules[i].clear();
     }
   }
 
   int err;
-  InternalScopedString tmp_path(64 +
-                                internal_strlen(common_flags()->coverage_dir));
+  InternalScopedString tmp_path(64 + internal_strlen(coverage_dir));
   uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(),
-                    "%s/%zd.sancov.map.tmp", common_flags()->coverage_dir,
-                    internal_getpid());
+                               "%s/%zd.sancov.map.tmp", coverage_dir,
+                               internal_getpid());
   CHECK_LE(res, tmp_path.size());
   uptr map_fd = OpenFile(tmp_path.data(), true);
-  if (internal_iserror(map_fd)) {
-    Report(" Coverage: failed to open %s for writing\n", tmp_path.data());
+  if (internal_iserror(map_fd, &err)) {
+    Report(" Coverage: failed to open %s for writing: %d\n", tmp_path.data(),
+           err);
     Die();
   }
 
@@ -113,9 +116,9 @@
   }
   internal_close(map_fd);
 
-  InternalScopedString path(64 + internal_strlen(common_flags()->coverage_dir));
+  InternalScopedString path(64 + internal_strlen(coverage_dir));
   res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map",
-                    common_flags()->coverage_dir, internal_getpid());
+                          coverage_dir, internal_getpid());
   CHECK_LE(res, path.size());
   res = internal_rename(tmp_path.data(), path.data());
   if (internal_iserror(res, &err)) {
diff --git a/lib/sanitizer_common/sanitizer_deadlock_detector.h b/lib/sanitizer_common/sanitizer_deadlock_detector.h
index 90e1cc4..86d5743 100644
--- a/lib/sanitizer_common/sanitizer_deadlock_detector.h
+++ b/lib/sanitizer_common/sanitizer_deadlock_detector.h
@@ -50,6 +50,8 @@
     if (epoch_ == current_epoch) return;
     bv_.clear();
     epoch_ = current_epoch;
+    n_recursive_locks = 0;
+    n_all_locks_ = 0;
   }
 
   uptr getEpoch() const { return epoch_; }
@@ -83,7 +85,8 @@
       }
     }
     // Printf("remLock: %zx %zx\n", lock_id, epoch_);
-    CHECK(bv_.clearBit(lock_id));
+    if (!bv_.clearBit(lock_id))
+      return;  // probably addLock happened before flush
     if (n_all_locks_) {
       for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
         if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
@@ -175,6 +178,7 @@
     recycled_nodes_.clear();
     available_nodes_.setAll();
     g_.clear();
+    n_edges_ = 0;
     return getAvailableNode(data);
   }
 
diff --git a/lib/sanitizer_common/sanitizer_flag_parser.cc b/lib/sanitizer_common/sanitizer_flag_parser.cc
new file mode 100644
index 0000000..d125002
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_flag_parser.cc
@@ -0,0 +1,153 @@
+//===-- sanitizer_flag_parser.cc ------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_flag_parser.h"
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_flag_parser.h"
+
+namespace __sanitizer {
+
+LowLevelAllocator FlagParser::Alloc;
+
+class UnknownFlags {
+  static const int kMaxUnknownFlags = 20;
+  const char *unknown_flags_[kMaxUnknownFlags];
+  int n_unknown_flags_;
+
+ public:
+  void Add(const char *name) {
+    CHECK_LT(n_unknown_flags_, kMaxUnknownFlags);
+    unknown_flags_[n_unknown_flags_++] = name;
+  }
+
+  void Report() {
+    if (!n_unknown_flags_) return;
+    Printf("WARNING: found %d unrecognized flag(s):\n", n_unknown_flags_);
+    for (int i = 0; i < n_unknown_flags_; ++i)
+      Printf("    %s\n", unknown_flags_[i]);
+    n_unknown_flags_ = 0;
+  }
+};
+
+UnknownFlags unknown_flags;
+
+void ReportUnrecognizedFlags() {
+  unknown_flags.Report();
+}
+
+char *FlagParser::ll_strndup(const char *s, uptr n) {
+  uptr len = internal_strnlen(s, n);
+  char *s2 = (char*)Alloc.Allocate(len + 1);
+  internal_memcpy(s2, s, len);
+  s2[len] = 0;
+  return s2;
+}
+
+void FlagParser::PrintFlagDescriptions() {
+  Printf("Available flags for %s:\n", SanitizerToolName);
+  for (int i = 0; i < n_flags_; ++i)
+    Printf("\t%s\n\t\t- %s\n", flags_[i].name, flags_[i].desc);
+}
+
+void FlagParser::fatal_error(const char *err) {
+  Printf("ERROR: %s\n", err);
+  Die();
+}
+
+bool FlagParser::is_space(char c) {
+  return c == ' ' || c == ',' || c == ':' || c == '\n' || c == '\t' ||
+         c == '\r';
+}
+
+void FlagParser::skip_whitespace() {
+  while (is_space(buf_[pos_])) ++pos_;
+}
+
+void FlagParser::parse_flag() {
+  uptr name_start = pos_;
+  while (buf_[pos_] != 0 && buf_[pos_] != '=' && !is_space(buf_[pos_])) ++pos_;
+  if (buf_[pos_] != '=') fatal_error("expected '='");
+  char *name = ll_strndup(buf_ + name_start, pos_ - name_start);
+
+  uptr value_start = ++pos_;
+  char *value;
+  if (buf_[pos_] == '\'' || buf_[pos_] == '"') {
+    char quote = buf_[pos_++];
+    while (buf_[pos_] != 0 && buf_[pos_] != quote) ++pos_;
+    if (buf_[pos_] == 0) fatal_error("unterminated string");
+    value = ll_strndup(buf_ + value_start + 1, pos_ - value_start - 1);
+    ++pos_; // consume the closing quote
+  } else {
+    while (buf_[pos_] != 0 && !is_space(buf_[pos_])) ++pos_;
+    if (buf_[pos_] != 0 && !is_space(buf_[pos_]))
+      fatal_error("expected separator or eol");
+    value = ll_strndup(buf_ + value_start, pos_ - value_start);
+  }
+
+  bool res = run_handler(name, value);
+  if (!res) fatal_error("Flag parsing failed.");
+}
+
+void FlagParser::parse_flags() {
+  while (true) {
+    skip_whitespace();
+    if (buf_[pos_] == 0) break;
+    parse_flag();
+  }
+
+  // Do a sanity check for certain flags.
+  if (common_flags_dont_use.malloc_context_size < 1)
+    common_flags_dont_use.malloc_context_size = 1;
+}
+
+void FlagParser::ParseString(const char *s) {
+  if (!s) return;
+  // Backup current parser state to allow nested ParseString() calls.
+  const char *old_buf_ = buf_;
+  uptr old_pos_ = pos_;
+  buf_ = s;
+  pos_ = 0;
+
+  parse_flags();
+
+  buf_ = old_buf_;
+  pos_ = old_pos_;
+}
+
+bool FlagParser::run_handler(const char *name, const char *value) {
+  for (int i = 0; i < n_flags_; ++i) {
+    if (internal_strcmp(name, flags_[i].name) == 0)
+      return flags_[i].handler->Parse(value);
+  }
+  // Unrecognized flag. This is not a fatal error, we may print a warning later.
+  unknown_flags.Add(name);
+  return true;
+}
+
+void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
+                                 const char *desc) {
+  CHECK_LT(n_flags_, kMaxFlags);
+  flags_[n_flags_].name = name;
+  flags_[n_flags_].desc = desc;
+  flags_[n_flags_].handler = handler;
+  ++n_flags_;
+}
+
+FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
+  flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);
+}
+
+}  // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_flag_parser.h b/lib/sanitizer_common/sanitizer_flag_parser.h
new file mode 100644
index 0000000..0ac7634
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_flag_parser.h
@@ -0,0 +1,121 @@
+//===-- sanitizer_flag_parser.h ---------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FLAG_REGISTRY_H
+#define SANITIZER_FLAG_REGISTRY_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class FlagHandlerBase {
+ public:
+  virtual bool Parse(const char *value) { return false; }
+};
+
+template <typename T>
+class FlagHandler : public FlagHandlerBase {
+  T *t_;
+
+ public:
+  explicit FlagHandler(T *t) : t_(t) {}
+  bool Parse(const char *value) final;
+};
+
+template <>
+inline bool FlagHandler<bool>::Parse(const char *value) {
+  if (internal_strcmp(value, "0") == 0 ||
+      internal_strcmp(value, "no") == 0 ||
+      internal_strcmp(value, "false") == 0) {
+    *t_ = false;
+    return true;
+  }
+  if (internal_strcmp(value, "1") == 0 ||
+      internal_strcmp(value, "yes") == 0 ||
+      internal_strcmp(value, "true") == 0) {
+    *t_ = true;
+    return true;
+  }
+  Printf("ERROR: Invalid value for bool option: '%s'\n", value);
+  return false;
+}
+
+template <>
+inline bool FlagHandler<const char *>::Parse(const char *value) {
+  *t_ = internal_strdup(value);
+  return true;
+}
+
+template <>
+inline bool FlagHandler<int>::Parse(const char *value) {
+  char *value_end;
+  *t_ = internal_simple_strtoll(value, &value_end, 10);
+  bool ok = *value_end == 0;
+  if (!ok) Printf("ERROR: Invalid value for int option: '%s'\n", value);
+  return ok;
+}
+
+template <>
+inline bool FlagHandler<uptr>::Parse(const char *value) {
+  char *value_end;
+  *t_ = internal_simple_strtoll(value, &value_end, 10);
+  bool ok = *value_end == 0;
+  if (!ok) Printf("ERROR: Invalid value for uptr option: '%s'\n", value);
+  return ok;
+}
+
+class FlagParser {
+  static const int kMaxFlags = 200;
+  struct Flag {
+    const char *name;
+    const char *desc;
+    FlagHandlerBase *handler;
+  } *flags_;
+  int n_flags_;
+
+  const char *buf_;
+  uptr pos_;
+
+ public:
+  FlagParser();
+  void RegisterHandler(const char *name, FlagHandlerBase *handler,
+                       const char *desc);
+  void ParseString(const char *s);
+  void PrintFlagDescriptions();
+
+  static LowLevelAllocator Alloc;
+
+ private:
+  void fatal_error(const char *err);
+  bool is_space(char c);
+  void skip_whitespace();
+  void parse_flags();
+  void parse_flag();
+  bool run_handler(const char *name, const char *value);
+  char *ll_strndup(const char *s, uptr n);
+};
+
+template <typename T>
+static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
+                         T *var) {
+  FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var);  // NOLINT
+  parser->RegisterHandler(name, fh, desc);
+}
+
+void ReportUnrecognizedFlags();
+
+}  // namespace __sanitizer
+
+#endif  // SANITIZER_FLAG_REGISTRY_H
diff --git a/lib/sanitizer_common/sanitizer_flags.cc b/lib/sanitizer_common/sanitizer_flags.cc
index 40b6ec0..e835b46 100644
--- a/lib/sanitizer_common/sanitizer_flags.cc
+++ b/lib/sanitizer_common/sanitizer_flags.cc
@@ -16,6 +16,7 @@
 #include "sanitizer_common.h"
 #include "sanitizer_libc.h"
 #include "sanitizer_list.h"
+#include "sanitizer_flag_parser.h"
 
 namespace __sanitizer {
 
@@ -34,274 +35,53 @@
 # define SANITIZER_NEEDS_SEGV 1
 #endif
 
-void SetCommonFlagsDefaults(CommonFlags *f) {
-  f->symbolize = true;
-  f->external_symbolizer_path = 0;
-  f->allow_addr2line = false;
-  f->strip_path_prefix = "";
-  f->fast_unwind_on_check = false;
-  f->fast_unwind_on_fatal = false;
-  f->fast_unwind_on_malloc = true;
-  f->handle_ioctl = false;
-  f->malloc_context_size = 1;
-  f->log_path = "stderr";
-  f->verbosity = 0;
-  f->detect_leaks = true;
-  f->leak_check_at_exit = true;
-  f->allocator_may_return_null = false;
-  f->print_summary = true;
-  f->check_printf = true;
-  // TODO(glider): tools may want to set different defaults for handle_segv.
-  f->handle_segv = SANITIZER_NEEDS_SEGV;
-  f->allow_user_segv_handler = false;
-  f->use_sigaltstack = true;
-  f->detect_deadlocks = false;
-  f->clear_shadow_mmap_threshold = 64 * 1024;
-  f->color = "auto";
-  f->legacy_pthread_cond = false;
-  f->intercept_tls_get_addr = false;
-  f->coverage = false;
-  f->coverage_direct = SANITIZER_ANDROID;
-  f->coverage_dir = ".";
-  f->full_address_space = false;
-  f->suppressions = "";
-  f->print_suppressions = true;
-  f->disable_coredump = (SANITIZER_WORDSIZE == 64);
-  f->symbolize_inline_frames = true;
-  f->stack_trace_format = "DEFAULT";
+void CommonFlags::SetDefaults() {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
 }
 
-void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
-  ParseFlag(str, &f->symbolize, "symbolize",
-      "If set, use the online symbolizer from common sanitizer runtime to turn "
-      "virtual addresses to file/line locations.");
-  ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path",
-      "Path to external symbolizer. If empty, the tool will search $PATH for "
-      "the symbolizer.");
-  ParseFlag(str, &f->allow_addr2line, "allow_addr2line",
-      "If set, allows online symbolizer to run addr2line binary to symbolize "
-      "stack traces (addr2line will only be used if llvm-symbolizer binary is "
-      "unavailable.");
-  ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix",
-      "Strips this prefix from file paths in error reports.");
-  ParseFlag(str, &f->fast_unwind_on_check, "fast_unwind_on_check",
-      "If available, use the fast frame-pointer-based unwinder on "
-      "internal CHECK failures.");
-  ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal",
-      "If available, use the fast frame-pointer-based unwinder on fatal "
-      "errors.");
-  ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc",
-      "If available, use the fast frame-pointer-based unwinder on "
-      "malloc/free.");
-  ParseFlag(str, &f->handle_ioctl, "handle_ioctl",
-      "Intercept and handle ioctl requests.");
-  ParseFlag(str, &f->malloc_context_size, "malloc_context_size",
-      "Max number of stack frames kept for each allocation/deallocation.");
-  ParseFlag(str, &f->log_path, "log_path",
-      "Write logs to \"log_path.pid\". The special values are \"stdout\" and "
-      "\"stderr\". The default is \"stderr\".");
-  ParseFlag(str, &f->verbosity, "verbosity",
-      "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).");
-  ParseFlag(str, &f->detect_leaks, "detect_leaks",
-      "Enable memory leak detection.");
-  ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit",
-      "Invoke leak checking in an atexit handler. Has no effect if "
-      "detect_leaks=false, or if __lsan_do_leak_check() is called before the "
-      "handler has a chance to run.");
-  ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null",
-      "If false, the allocator will crash instead of returning 0 on "
-      "out-of-memory.");
-  ParseFlag(str, &f->print_summary, "print_summary",
-      "If false, disable printing error summaries in addition to error "
-      "reports.");
-  ParseFlag(str, &f->check_printf, "check_printf",
-      "Check printf arguments.");
-  ParseFlag(str, &f->handle_segv, "handle_segv",
-      "If set, registers the tool's custom SEGV handler (both SIGBUS and "
-      "SIGSEGV on OSX).");
-  ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler",
-      "If set, allows user to register a SEGV handler even if the tool "
-      "registers one.");
-  ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack",
-      "If set, uses alternate stack for signal handling.");
-  ParseFlag(str, &f->detect_deadlocks, "detect_deadlocks",
-      "If set, deadlock detection is enabled.");
-  ParseFlag(str, &f->clear_shadow_mmap_threshold,
-            "clear_shadow_mmap_threshold",
-      "Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
-      "memset(). This is the threshold size in bytes.");
-  ParseFlag(str, &f->color, "color",
-      "Colorize reports: (always|never|auto).");
-  ParseFlag(str, &f->legacy_pthread_cond, "legacy_pthread_cond",
-      "Enables support for dynamic libraries linked with libpthread 2.2.5.");
-  ParseFlag(str, &f->intercept_tls_get_addr, "intercept_tls_get_addr",
-            "Intercept __tls_get_addr.");
-  ParseFlag(str, &f->help, "help", "Print the flag descriptions.");
-  ParseFlag(str, &f->mmap_limit_mb, "mmap_limit_mb",
-            "Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
-            "not a user-facing flag, used mosly for testing the tools");
-  ParseFlag(str, &f->coverage, "coverage",
-      "If set, coverage information will be dumped at program shutdown (if the "
-      "coverage instrumentation was enabled at compile time).");
-  ParseFlag(str, &f->coverage_direct, "coverage_direct",
-            "If set, coverage information will be dumped directly to a memory "
-            "mapped file. This way data is not lost even if the process is "
-            "suddenly killed.");
-  ParseFlag(str, &f->coverage_dir, "coverage_dir",
-            "Target directory for coverage dumps. Defaults to the current "
-            "directory.");
-  ParseFlag(str, &f->full_address_space, "full_address_space",
-            "Sanitize complete address space; "
-            "by default kernel area on 32-bit platforms will not be sanitized");
-  ParseFlag(str, &f->suppressions, "suppressions", "Suppressions file name.");
-  ParseFlag(str, &f->print_suppressions, "print_suppressions",
-            "Print matched suppressions at exit.");
-  ParseFlag(str, &f->disable_coredump, "disable_coredump",
-      "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
-      "dumping a 16T+ core file. Ignored on OSes that don't dump core by"
-      "default and for sanitizers that don't reserve lots of virtual memory.");
-  ParseFlag(str, &f->symbolize_inline_frames, "symbolize_inline_frames",
-            "Print inlined frames in stacktraces. Defaults to true.");
-  ParseFlag(str, &f->stack_trace_format, "stack_trace_format",
-            "Format string used to render stack frames. "
-            "See sanitizer_stacktrace_printer.h for the format description. "
-            "Use DEFAULT to get default format.");
-
-  // Do a sanity check for certain flags.
-  if (f->malloc_context_size < 1)
-    f->malloc_context_size = 1;
+void CommonFlags::CopyFrom(const CommonFlags &other) {
+  internal_memcpy(this, &other, sizeof(*this));
 }
 
-static bool GetFlagValue(const char *env, const char *name,
-                         const char **value, int *value_length) {
-  if (env == 0)
-    return false;
-  const char *pos = 0;
-  for (;;) {
-    pos = internal_strstr(env, name);
-    if (pos == 0)
+class FlagHandlerInclude : public FlagHandlerBase {
+  static const uptr kMaxIncludeSize = 1 << 15;
+  FlagParser *parser_;
+
+ public:
+  explicit FlagHandlerInclude(FlagParser *parser) : parser_(parser) {}
+  bool Parse(const char *value) final {
+    char *data;
+    uptr data_mapped_size;
+    int err;
+    uptr len =
+      ReadFileToBuffer(value, &data, &data_mapped_size,
+                       Max(kMaxIncludeSize, GetPageSizeCached()), &err);
+    if (!len) {
+      Printf("Failed to read options from '%s': error %d\n", value, err);
       return false;
-    const char *name_end = pos + internal_strlen(name);
-    if ((pos != env &&
-         ((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) ||
-        *name_end != '=') {
-      // Seems to be middle of another flag name or value.
-      env = pos + 1;
-      continue;
     }
-    pos = name_end;
-    break;
+    parser_->ParseString(data);
+    UnmapOrDie(data, data_mapped_size);
+    return true;
   }
-  const char *end;
-  if (pos[0] != '=') {
-    end = pos;
-  } else {
-    pos += 1;
-    if (pos[0] == '"') {
-      pos += 1;
-      end = internal_strchr(pos, '"');
-    } else if (pos[0] == '\'') {
-      pos += 1;
-      end = internal_strchr(pos, '\'');
-    } else {
-      // Read until the next space or colon.
-      end = pos + internal_strcspn(pos, " :");
-    }
-    if (end == 0)
-      end = pos + internal_strlen(pos);
-  }
-  *value = pos;
-  *value_length = end - pos;
-  return true;
+};
+
+void RegisterIncludeFlag(FlagParser *parser, CommonFlags *cf) {
+  FlagHandlerInclude *fh_include =
+      new (FlagParser::Alloc) FlagHandlerInclude(parser);  // NOLINT
+  parser->RegisterHandler("include", fh_include,
+                          "read more options from the given file");
 }
 
-static bool StartsWith(const char *flag, int flag_length, const char *value) {
-  if (!flag || !value)
-    return false;
-  int value_length = internal_strlen(value);
-  return (flag_length >= value_length) &&
-         (0 == internal_strncmp(flag, value, value_length));
-}
+void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) \
+  RegisterFlag(parser, #Name, Description, &cf->Name);
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
 
-static LowLevelAllocator allocator_for_flags;
-
-// The linear scan is suboptimal, but the number of flags is relatively small.
-bool FlagInDescriptionList(const char *name) {
-  IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
-  while (it.hasNext()) {
-    if (!internal_strcmp(it.next()->name, name)) return true;
-  }
-  return false;
-}
-
-void AddFlagDescription(const char *name, const char *description) {
-  if (FlagInDescriptionList(name)) return;
-  FlagDescription *new_description = new(allocator_for_flags) FlagDescription;
-  new_description->name = name;
-  new_description->description = description;
-  flag_descriptions.push_back(new_description);
-}
-
-// TODO(glider): put the descriptions inside CommonFlags.
-void PrintFlagDescriptions() {
-  IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
-  Printf("Available flags for %s:\n", SanitizerToolName);
-  while (it.hasNext()) {
-    FlagDescription *descr = it.next();
-    Printf("\t%s\n\t\t- %s\n", descr->name, descr->description);
-  }
-}
-
-void ParseFlag(const char *env, bool *flag,
-               const char *name, const char *descr) {
-  const char *value;
-  int value_length;
-  AddFlagDescription(name, descr);
-  if (!GetFlagValue(env, name, &value, &value_length))
-    return;
-  if (StartsWith(value, value_length, "0") ||
-      StartsWith(value, value_length, "no") ||
-      StartsWith(value, value_length, "false"))
-    *flag = false;
-  if (StartsWith(value, value_length, "1") ||
-      StartsWith(value, value_length, "yes") ||
-      StartsWith(value, value_length, "true"))
-    *flag = true;
-}
-
-void ParseFlag(const char *env, int *flag,
-               const char *name, const char *descr) {
-  const char *value;
-  int value_length;
-  AddFlagDescription(name, descr);
-  if (!GetFlagValue(env, name, &value, &value_length))
-    return;
-  *flag = static_cast<int>(internal_atoll(value));
-}
-
-void ParseFlag(const char *env, uptr *flag,
-               const char *name, const char *descr) {
-  const char *value;
-  int value_length;
-  AddFlagDescription(name, descr);
-  if (!GetFlagValue(env, name, &value, &value_length))
-    return;
-  *flag = static_cast<uptr>(internal_atoll(value));
-}
-
-void ParseFlag(const char *env, const char **flag,
-               const char *name, const char *descr) {
-  const char *value;
-  int value_length;
-  AddFlagDescription(name, descr);
-  if (!GetFlagValue(env, name, &value, &value_length))
-    return;
-  // Copy the flag value. Don't use locks here, as flags are parsed at
-  // tool startup.
-  char *value_copy = (char*)(allocator_for_flags.Allocate(value_length + 1));
-  internal_memcpy(value_copy, value, value_length);
-  value_copy[value_length] = '\0';
-  *flag = value_copy;
+  RegisterIncludeFlag(parser, cf);
 }
 
 }  // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_flags.h b/lib/sanitizer_common/sanitizer_flags.h
index 4791397..fda6d71 100644
--- a/lib/sanitizer_common/sanitizer_flags.h
+++ b/lib/sanitizer_common/sanitizer_flags.h
@@ -18,62 +18,38 @@
 
 namespace __sanitizer {
 
-void ParseFlag(const char *env, bool *flag,
-    const char *name, const char *descr);
-void ParseFlag(const char *env, int *flag,
-    const char *name, const char *descr);
-void ParseFlag(const char *env, uptr *flag,
-    const char *name, const char *descr);
-void ParseFlag(const char *env, const char **flag,
-    const char *name, const char *descr);
-
 struct CommonFlags {
-  bool symbolize;
-  const char *external_symbolizer_path;
-  bool allow_addr2line;
-  const char *strip_path_prefix;
-  bool fast_unwind_on_check;
-  bool fast_unwind_on_fatal;
-  bool fast_unwind_on_malloc;
-  bool handle_ioctl;
-  int malloc_context_size;
-  const char *log_path;
-  int  verbosity;
-  bool detect_leaks;
-  bool leak_check_at_exit;
-  bool allocator_may_return_null;
-  bool print_summary;
-  bool check_printf;
-  bool handle_segv;
-  bool allow_user_segv_handler;
-  bool use_sigaltstack;
-  bool detect_deadlocks;
-  uptr clear_shadow_mmap_threshold;
-  const char *color;
-  bool legacy_pthread_cond;
-  bool intercept_tls_get_addr;
-  bool help;
-  uptr mmap_limit_mb;
-  bool coverage;
-  bool coverage_direct;
-  const char *coverage_dir;
-  bool full_address_space;
-  const char *suppressions;
-  bool print_suppressions;
-  bool disable_coredump;
-  bool symbolize_inline_frames;
-  const char *stack_trace_format;
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
+
+  void SetDefaults();
+  void CopyFrom(const CommonFlags &other);
 };
 
-inline CommonFlags *common_flags() {
-  extern CommonFlags common_flags_dont_use;
+// Functions to get/set global CommonFlags shared by all sanitizer runtimes:
+extern CommonFlags common_flags_dont_use;
+inline const CommonFlags *common_flags() {
   return &common_flags_dont_use;
 }
 
-void SetCommonFlagsDefaults(CommonFlags *f);
-void ParseCommonFlagsFromString(CommonFlags *f, const char *str);
-void PrintFlagDescriptions();
+inline void SetCommonFlagsDefaults() {
+  common_flags_dont_use.SetDefaults();
+}
 
+// This function can only be used to setup tool-specific overrides for
+// CommonFlags defaults. Generally, it should only be used right after
+// SetCommonFlagsDefaults(), but before ParseCommonFlagsFromString(), and
+// only during the flags initialization (i.e. before they are used for
+// the first time).
+inline void OverrideCommonFlags(const CommonFlags &cf) {
+  common_flags_dont_use.CopyFrom(cf);
+}
+