update to webrtc revision 1349

Updated audio processing modules from revision 180 to 1349.
Main changes are:
- code clean up and reformating
- source path reorganization
- improved performance

Also imported test code that was not included in initial
drop from webrtc.

Change-Id: Ie4eb0e29990052e5f2d7f0b271b42eead40dbb6a
diff --git a/Android.mk b/Android.mk
index aa90dd3..dc214e9 100644
--- a/Android.mk
+++ b/Android.mk
@@ -8,23 +8,60 @@
 
 MY_WEBRTC_ROOT_PATH := $(call my-dir)
 
-
+# voice
+include $(MY_WEBRTC_ROOT_PATH)/src/common_audio/resampler/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/src/common_audio/signal_processing/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/src/common_audio/vad/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/aec/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/aecm/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/agc/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/ns/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/utility/Android.mk
+#include $(MY_WEBRTC_ROOT_PATH)/src/modules/utility/source/Android.mk
 include $(MY_WEBRTC_ROOT_PATH)/src/system_wrappers/source/Android.mk
 
-# audio processing
-include $(MY_WEBRTC_ROOT_PATH)/src/common_audio/resampler/main/source/Android.mk
-include $(MY_WEBRTC_ROOT_PATH)/src/common_audio/signal_processing_library/main/source/Android.mk
-include $(MY_WEBRTC_ROOT_PATH)/src/common_audio/vad/main/source/Android.mk
-include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/aec/main/source/Android.mk
-include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/aecm/main/source/Android.mk
-include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/agc/main/source/Android.mk
-include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/main/source/Android.mk
-include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/ns/main/source/Android.mk
-include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/utility/Android.mk
-
-
 # build .so
-include $(MY_WEBRTC_ROOT_PATH)/android-webrtc.mk
+LOCAL_PATH := $(call my-dir)
 
-# build test apps
-include $(MY_WEBRTC_ROOT_PATH)/src/modules/audio_processing/main/test/process_test/Android.mk
+include $(CLEAR_VARS)
+include $(LOCAL_PATH)/../../external/webrtc/android-webrtc.mk
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE := libwebrtc_audio_preprocessing
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_WHOLE_STATIC_LIBRARIES := \
+    libwebrtc_spl \
+    libwebrtc_resampler \
+    libwebrtc_apm \
+    libwebrtc_apm_utility \
+    libwebrtc_vad \
+    libwebrtc_ns \
+    libwebrtc_agc \
+    libwebrtc_aec \
+    libwebrtc_aecm \
+    libwebrtc_system_wrappers
+
+# Add Neon libraries.
+ifeq ($(WEBRTC_BUILD_NEON_LIBS),true)
+LOCAL_WHOLE_STATIC_LIBRARIES += \
+    libwebrtc_aecm_neon \
+    libwebrtc_ns_neon
+endif
+
+LOCAL_STATIC_LIBRARIES := \
+    libprotobuf-cpp-2.3.0-lite
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libdl \
+    libstlport
+
+LOCAL_PRELINK_MODULE := false
+
+ifndef NDK_ROOT
+include external/stlport/libstlport.mk
+endif
+include $(BUILD_SHARED_LIBRARY)
+
diff --git a/NOTICE b/NOTICE
index 1c39e5b..f96164a 100644
--- a/NOTICE
+++ b/NOTICE
@@ -108,4 +108,59 @@
 Scott McMurray
 */
 
+===============================================================================
 
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//    * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//    * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//    * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+===============================================================================
+
+//  (C) Copyright Greg Colvin and Beman Dawes 1998, 1999.
+//  Copyright (c) 2001, 2002 Peter Dimov
+//
+//  Permission to copy, use, modify, sell and distribute this software
+//  is granted provided this copyright notice appears in all copies.
+//  This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+//
+//  See http://www.boost.org/libs/smart_ptr/scoped_ptr.htm for documentation.
+//
+
+//  scoped_ptr mimics a built-in pointer except that it guarantees deletion
+//  of the object pointed to, either on destruction of the scoped_ptr or via
+//  an explicit reset(). scoped_ptr is a simple solution for simple needs;
+//  use shared_ptr or std::auto_ptr if your needs are more complex.
+
+//  scoped_ptr_malloc added in by Google.  When one of
+//  these goes out of scope, instead of doing a delete or delete[], it
+//  calls free().  scoped_ptr_malloc<char> is likely to see much more
+//  use than any other specializations.
+
+//  release() added in by Google. Use this to conditionally
+//  transfer ownership of a heap-allocated object to the caller, usually on
+//  method success.
diff --git a/android-webrtc.mk b/android-webrtc.mk
index 01d6a9e..dc92aeb 100644
--- a/android-webrtc.mk
+++ b/android-webrtc.mk
@@ -6,91 +6,44 @@
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
-MY_APM_WHOLE_STATIC_LIBRARIES := \
-    libwebrtc_spl \
-    libwebrtc_resampler \
-    libwebrtc_apm \
-    libwebrtc_apm_utility \
-    libwebrtc_vad \
-    libwebrtc_ns \
-    libwebrtc_agc \
-    libwebrtc_aec \
-    libwebrtc_aecm
+# These defines will apply to all source files
+# Think again before changing it
+MY_WEBRTC_COMMON_DEFS := \
+    '-DWEBRTC_TARGET_PC' \
+    '-DWEBRTC_LINUX' \
+    '-DWEBRTC_THREAD_RR' \
+    '-DWEBRTC_CLOCK_TYPE_REALTIME' \
+    '-DWEBRTC_ANDROID'
+#    The following macros are used by modules,
+#    we might need to re-organize them
+#    '-DWEBRTC_ANDROID_OPENSLES' [module audio_device]
+#    '-DNETEQ_VOICEENGINE_CODECS' [module audio_coding neteq]
+#    '-DWEBRTC_MODULE_UTILITY_VIDEO' [module media_file] [module utility]
+ifeq ($(TARGET_ARCH),arm)
+MY_WEBRTC_COMMON_DEFS += \
+    '-DWEBRTC_ARCH_ARM'
+#    '-DWEBRTC_DETECT_ARM_NEON' # only used in a build configuration without Neon
+# TODO(kma): figure out if the above define could be moved to NDK build only.
 
-LOCAL_PATH := $(call my-dir)
+# TODO(kma): test if the code under next two macros works with generic GCC compilers
+ifeq ($(ARCH_ARM_HAVE_NEON),true)
+MY_WEBRTC_COMMON_DEFS += \
+    '-DWEBRTC_ARCH_ARM_NEON'
+MY_ARM_CFLAGS_NEON := \
+    -flax-vector-conversions
+endif
 
-include $(CLEAR_VARS)
+ifneq (,$(filter '-DWEBRTC_DETECT_ARM_NEON' '-DWEBRTC_ARCH_ARM_NEON', \
+    $(MY_WEBRTC_COMMON_DEFS)))
+WEBRTC_BUILD_NEON_LIBS := true
+endif
 
-LOCAL_ARM_MODE := arm
-LOCAL_MODULE := libwebrtc_audio_preprocessing
-LOCAL_MODULE_TAGS := optional
-LOCAL_LDFLAGS :=
+ifeq ($(ARCH_ARM_HAVE_ARMV7A),true)
+MY_WEBRTC_COMMON_DEFS += \
+    '-DWEBRTC_ARCH_ARM_V7A'
+endif
 
-LOCAL_WHOLE_STATIC_LIBRARIES := \
-    $(MY_APM_WHOLE_STATIC_LIBRARIES) \
-    libwebrtc_system_wrappers \
-
-LOCAL_SHARED_LIBRARIES := \
-    libcutils \
-    libdl \
-    libstlport
-
-LOCAL_ADDITIONAL_DEPENDENCIES :=
-
-include external/stlport/libstlport.mk
-include $(BUILD_SHARED_LIBRARY)
-
-###
-
-#LOCAL_PATH := $(call my-dir)
-#
-#include $(CLEAR_VARS)
-#
-#LOCAL_ARM_MODE := arm
-#LOCAL_MODULE := libwebrtc
-#LOCAL_MODULE_TAGS := optional
-#LOCAL_LDFLAGS :=
-#
-#LOCAL_WHOLE_STATIC_LIBRARIES := \
-#    libwebrtc_system_wrappers \
-#    libwebrtc_audio_device \
-#    libwebrtc_pcm16b \
-#    libwebrtc_cng \
-#    libwebrtc_audio_coding \
-#    libwebrtc_rtp_rtcp \
-#    libwebrtc_media_file \
-#    libwebrtc_udp_transport \
-#    libwebrtc_utility \
-#    libwebrtc_neteq \
-#    libwebrtc_audio_conference_mixer \
-#    libwebrtc_isac \
-#    libwebrtc_ilbc \
-#    libwebrtc_isacfix \
-#    libwebrtc_g722 \
-#    libwebrtc_g711 \
-#    libwebrtc_voe_core \
-#    libwebrtc_video_render \
-#    libwebrtc_video_capture \
-#    libwebrtc_i420 \
-#    libwebrtc_video_coding \
-#    libwebrtc_video_processing \
-#    libwebrtc_vp8 \
-#    libwebrtc_vie_core \
-#    libwebrtc_vplib \
-#    libwebrtc_jpeg \
-#    libwebrtc_vpx
-#
-#LOCAL_STATIC_LIBRARIES :=
-#LOCAL_SHARED_LIBRARIES := \
-#    libcutils \
-#    libdl \
-#    libstlport \
-#    libjpeg \
-#    libGLESv2 \
-#    libOpenSLES \
-#    libwebrtc_audio_preprocessing
-#
-#LOCAL_ADDITIONAL_DEPENDENCIES :=
-#
-#include external/stlport/libstlport.mk
-#include $(BUILD_SHARED_LIBRARY)
+else ifeq ($(TARGET_ARCH),x86)
+MY_WEBRTC_COMMON_DEFS += \
+    '-DWEBRTC_USE_SSE2'
+endif
diff --git a/src/LICENSE b/src/LICENSE
new file mode 100644
index 0000000..4c41b7b
--- /dev/null
+++ b/src/LICENSE
@@ -0,0 +1,29 @@
+Copyright (c) 2011, The WebRTC project authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+  * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+  * Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+  * Neither the name of Google nor the names of its contributors may
+    be used to endorse or promote products derived from this software
+    without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/PATENTS b/src/PATENTS
new file mode 100644
index 0000000..190607a
--- /dev/null
+++ b/src/PATENTS
@@ -0,0 +1,24 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the WebRTC code package.
+
+Google hereby grants to you a perpetual, worldwide, non-exclusive,
+no-charge, irrevocable (except as stated in this section) patent
+license to make, have made, use, offer to sell, sell, import,
+transfer, and otherwise run, modify and propagate the contents of this
+implementation of the WebRTC code package, where such license applies
+only to those patent claims, both currently owned by Google and
+acquired in the future, licensable by Google that are necessarily
+infringed by this implementation of the WebRTC code package. This
+grant does not include claims that would be infringed only as a
+consequence of further modification of this implementation. If you or
+your agent or exclusive licensee institute or order or agree to the
+institution of patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that this
+implementation of the WebRTC code package or any code incorporated
+within this implementation of the WebRTC code package constitutes
+direct or contributory patent infringement, or inducement of patent
+infringement, then any patent rights granted to you under this License
+for this implementation of the WebRTC code package shall terminate as
+of the date such litigation is filed.
diff --git a/src/common_audio/OWNERS b/src/common_audio/OWNERS
index 0eb967b..84582f2 100644
--- a/src/common_audio/OWNERS
+++ b/src/common_audio/OWNERS
@@ -1 +1,4 @@
-bjornv@google.com
+bjornv@webrtc.org
+tina.legrand@webrtc.org
+jan.skoglund@webrtc.org
+andrew@webrtc.org
diff --git a/src/common_audio/common_audio.gyp b/src/common_audio/common_audio.gyp
new file mode 100644
index 0000000..3d3da3f
--- /dev/null
+++ b/src/common_audio/common_audio.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+  'includes': [
+    '../build/common.gypi',
+    'signal_processing/signal_processing.gypi',
+    'resampler/resampler.gypi',
+    'vad/vad.gypi',
+  ],
+}
diff --git a/src/common_audio/resampler/Android.mk b/src/common_audio/resampler/Android.mk
new file mode 100644
index 0000000..b1d630a
--- /dev/null
+++ b/src/common_audio/resampler/Android.mk
@@ -0,0 +1,47 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+include $(LOCAL_PATH)/../../../android-webrtc.mk
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := libwebrtc_resampler
+LOCAL_MODULE_TAGS := optional
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_SRC_FILES := resampler.cc
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS)
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/include \
+    $(LOCAL_PATH)/../.. \
+    $(LOCAL_PATH)/../signal_processing/include 
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libdl \
+    libstlport
+
+ifeq ($(TARGET_OS)-$(TARGET_SIMULATOR),linux-true)
+LOCAL_LDLIBS += -ldl -lpthread
+endif
+
+ifneq ($(TARGET_SIMULATOR),true)
+LOCAL_SHARED_LIBRARIES += libdl
+endif
+
+ifndef NDK_ROOT
+include external/stlport/libstlport.mk
+endif
+include $(BUILD_STATIC_LIBRARY)
diff --git a/src/common_audio/resampler/OWNERS b/src/common_audio/resampler/OWNERS
deleted file mode 100644
index cf595df..0000000
--- a/src/common_audio/resampler/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-bjornv@google.com
-tlegrand@google.com
-jks@google.com
diff --git a/src/common_audio/resampler/main/interface/resampler.h b/src/common_audio/resampler/include/resampler.h
similarity index 90%
rename from src/common_audio/resampler/main/interface/resampler.h
rename to src/common_audio/resampler/include/resampler.h
index a03ff18..38e6bd3 100644
--- a/src/common_audio/resampler/main/interface/resampler.h
+++ b/src/common_audio/resampler/include/resampler.h
@@ -21,6 +21,8 @@
 namespace webrtc
 {
 
+// TODO(andrew): the implementation depends on the exact values of this enum.
+// It should be rewritten in a less fragile way.
 enum ResamplerType
 {
     // 4 MSB = Number of channels
@@ -33,6 +35,7 @@
     kResamplerInvalid = 0xff
 };
 
+// TODO(andrew): doesn't need to be part of the interface.
 enum ResamplerMode
 {
     kResamplerMode1To1,
@@ -40,6 +43,7 @@
     kResamplerMode1To3,
     kResamplerMode1To4,
     kResamplerMode1To6,
+    kResamplerMode1To12,
     kResamplerMode2To3,
     kResamplerMode2To11,
     kResamplerMode4To11,
@@ -50,6 +54,7 @@
     kResamplerMode3To1,
     kResamplerMode4To1,
     kResamplerMode6To1,
+    kResamplerMode12To1,
     kResamplerMode3To2,
     kResamplerMode11To2,
     kResamplerMode11To4,
@@ -61,6 +66,7 @@
 
 public:
     Resampler();
+    // TODO(andrew): use an init function instead.
     Resampler(int inFreq, int outFreq, ResamplerType type);
     ~Resampler();
 
diff --git a/src/common_audio/resampler/main/source/Android.mk b/src/common_audio/resampler/main/source/Android.mk
deleted file mode 100644
index b78e3af..0000000
--- a/src/common_audio/resampler/main/source/Android.mk
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_ARM_MODE := arm
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_MODULE := libwebrtc_resampler
-LOCAL_MODULE_TAGS := optional
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_GENERATED_SOURCES :=
-LOCAL_SRC_FILES := resampler.cc
-
-# Flags passed to both C and C++ files.
-MY_CFLAGS := 
-MY_CFLAGS_C :=
-MY_DEFS := '-DNO_TCMALLOC' \
-    '-DNO_HEAPCHECKER' \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
-LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
-
-# Include paths placed before CFLAGS/CPPFLAGS
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/../../../.. \
-    $(LOCAL_PATH)/../interface \
-    $(LOCAL_PATH)/../../../signal_processing_library/main/interface 
-
-# Flags passed to only C++ (and not C) files.
-LOCAL_CPPFLAGS := 
-
-LOCAL_LDFLAGS :=
-
-LOCAL_STATIC_LIBRARIES :=
-
-LOCAL_SHARED_LIBRARIES := libcutils \
-    libdl \
-    libstlport
-LOCAL_ADDITIONAL_DEPENDENCIES :=
-
-ifeq ($(TARGET_OS)-$(TARGET_SIMULATOR),linux-true)
-LOCAL_LDLIBS += -ldl -lpthread
-endif
-
-ifneq ($(TARGET_SIMULATOR),true)
-LOCAL_SHARED_LIBRARIES += libdl
-endif
-
-include external/stlport/libstlport.mk
-include $(BUILD_STATIC_LIBRARY)
diff --git a/src/common_audio/resampler/main/source/resampler.gyp b/src/common_audio/resampler/main/source/resampler.gyp
deleted file mode 100644
index 8baf870..0000000
--- a/src/common_audio/resampler/main/source/resampler.gyp
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-{
-  'includes': [
-    '../../../../common_settings.gypi', # Common settings
-  ],
-  'targets': [
-    {
-      'target_name': 'resampler',
-      'type': '<(library)',
-      'dependencies': [
-        '../../../signal_processing_library/main/source/spl.gyp:spl',
-      ],
-      'include_dirs': [
-        '../interface',
-      ],
-      'direct_dependent_settings': {
-        'include_dirs': [
-          '../interface',
-        ],
-      },
-      'sources': [
-        '../interface/resampler.h',
-        'resampler.cc',
-      ],
-    },
-  ],
-}
-
-# Local Variables:
-# tab-width:2
-# indent-tabs-mode:nil
-# End:
-# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/common_audio/resampler/main/source/resampler.cc b/src/common_audio/resampler/resampler.cc
similarity index 87%
rename from src/common_audio/resampler/main/source/resampler.cc
rename to src/common_audio/resampler/resampler.cc
index f866739..2db27b1 100644
--- a/src/common_audio/resampler/main/source/resampler.cc
+++ b/src/common_audio/resampler/resampler.cc
@@ -62,8 +62,7 @@
     slave_left_ = NULL;
     slave_right_ = NULL;
 
-    int res = Reset(inFreq, outFreq, type);
-
+    Reset(inFreq, outFreq, type);
 }
 
 Resampler::~Resampler()
@@ -185,7 +184,8 @@
     if ((my_type_ & 0xf0) == 0x20)
     {
         // Change type to mono
-        type = (ResamplerType)((int)type & 0x0f + 0x10);
+        type = static_cast<ResamplerType>(
+            ((static_cast<int>(type) & 0x0f) + 0x10));
         slave_left_ = new Resampler(inFreq, outFreq, type);
         slave_right_ = new Resampler(inFreq, outFreq, type);
     }
@@ -209,9 +209,12 @@
             case 6:
                 my_mode_ = kResamplerMode1To6;
                 break;
+            case 12:
+                my_mode_ = kResamplerMode1To12;
+                break;
             default:
                 my_type_ = kResamplerInvalid;
-                break;
+                return -1;
         }
     } else if (outFreq == 1)
     {
@@ -229,9 +232,12 @@
             case 6:
                 my_mode_ = kResamplerMode6To1;
                 break;
+            case 12:
+                my_mode_ = kResamplerMode12To1;
+                break;
             default:
                 my_type_ = kResamplerInvalid;
-                break;
+                return -1;
         }
     } else if ((inFreq == 2) && (outFreq == 3))
     {
@@ -299,6 +305,18 @@
             state2_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
             WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state2_);
             break;
+        case kResamplerMode1To12:
+            // 1:2
+            state1_ = malloc(8 * sizeof(WebRtc_Word32));
+            memset(state1_, 0, 8 * sizeof(WebRtc_Word32));
+            // 2:4
+            state2_ = malloc(8 * sizeof(WebRtc_Word32));
+            memset(state2_, 0, 8 * sizeof(WebRtc_Word32));
+            // 4:12
+            state3_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
+            WebRtcSpl_ResetResample16khzTo48khz(
+                (WebRtcSpl_State16khzTo48khz*) state3_);
+            break;
         case kResamplerMode2To3:
             // 2:6
             state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
@@ -367,6 +385,18 @@
             state2_ = malloc(8 * sizeof(WebRtc_Word32));
             memset(state2_, 0, 8 * sizeof(WebRtc_Word32));
             break;
+        case kResamplerMode12To1:
+            // 12:4
+            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
+            WebRtcSpl_ResetResample48khzTo16khz(
+                (WebRtcSpl_State48khzTo16khz*) state1_);
+            // 4:2
+            state2_ = malloc(8 * sizeof(WebRtc_Word32));
+            memset(state2_, 0, 8 * sizeof(WebRtc_Word32));
+            // 2:1
+            state3_ = malloc(8 * sizeof(WebRtc_Word32));
+            memset(state3_, 0, 8 * sizeof(WebRtc_Word32));
+            break;
         case kResamplerMode3To2:
             // 3:6
             state1_ = malloc(8 * sizeof(WebRtc_Word32));
@@ -458,8 +488,9 @@
         return 0;
     }
 
-    // Container for temp samples
+    // Containers for temp samples
     WebRtc_Word16* tmp;
+    WebRtc_Word16* tmp_2;
     // tmp data for resampling routines
     WebRtc_Word32* tmp_mem;
 
@@ -545,6 +576,41 @@
             free(tmp);
 
             return 0;
+        case kResamplerMode1To12:
+            // We can only handle blocks of 40 samples
+            // Can be fixed, but I don't think it's needed
+            if ((lengthIn % 40) != 0) {
+              return -1;
+            }
+            if (maxLen < (lengthIn * 12)) {
+              return -1;
+            }
+
+            tmp_mem = (WebRtc_Word32*) malloc(336 * sizeof(WebRtc_Word32));
+            tmp = (WebRtc_Word16*) malloc(sizeof(WebRtc_Word16) * 4 * lengthIn);
+            //1:2
+            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut,
+                                  (WebRtc_Word32*) state1_);
+            outLen = lengthIn * 2;
+            //2:4
+            WebRtcSpl_UpsampleBy2(samplesOut, outLen, tmp, (WebRtc_Word32*) state2_);
+            outLen = outLen * 2;
+            // 4:12
+            for (int i = 0; i < outLen; i += 160) {
+              // WebRtcSpl_Resample16khzTo48khz() takes a block of 160 samples
+              // as input and outputs a resampled block of 480 samples. The
+              // data is now actually in 32 kHz sampling rate, despite the
+              // function name, and with a resampling factor of three becomes
+              // 96 kHz.
+              WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
+                                             (WebRtcSpl_State16khzTo48khz*) state3_,
+                                             tmp_mem);
+            }
+            outLen = outLen * 3;
+            free(tmp_mem);
+            free(tmp);
+
+            return 0;
         case kResamplerMode2To3:
             if (maxLen < (lengthIn * 3 / 2))
             {
@@ -783,6 +849,43 @@
             free(tmp);
             outLen = outLen / 2;
             return 0;
+        case kResamplerMode12To1:
+            // We can only handle blocks of 480 samples
+            // Can be fixed, but I don't think it's needed
+            if ((lengthIn % 480) != 0) {
+              return -1;
+            }
+            if (maxLen < (lengthIn / 12)) {
+              return -1;
+            }
+
+            tmp_mem = (WebRtc_Word32*) malloc(496 * sizeof(WebRtc_Word32));
+            tmp = (WebRtc_Word16*) malloc((sizeof(WebRtc_Word16) * lengthIn) / 3);
+            tmp_2 = (WebRtc_Word16*) malloc((sizeof(WebRtc_Word16) * lengthIn) / 6);
+            // 12:4
+            for (int i = 0; i < lengthIn; i += 480) {
+              // WebRtcSpl_Resample48khzTo16khz() takes a block of 480 samples
+              // as input and outputs a resampled block of 160 samples. The
+              // data is now actually in 96 kHz sampling rate, despite the
+              // function name, and with a resampling factor of 1/3 becomes
+              // 32 kHz.
+              WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
+                                             (WebRtcSpl_State48khzTo16khz*) state1_,
+                                             tmp_mem);
+            }
+            outLen = lengthIn / 3;
+            free(tmp_mem);
+            // 4:2
+            WebRtcSpl_DownsampleBy2(tmp, outLen, tmp_2,
+                                    (WebRtc_Word32*) state2_);
+            outLen = outLen / 2;
+            free(tmp);
+            // 2:1
+            WebRtcSpl_DownsampleBy2(tmp_2, outLen, samplesOut,
+                                    (WebRtc_Word32*) state3_);
+            free(tmp_2);
+            outLen = outLen / 2;
+            return 0;
         case kResamplerMode3To2:
             if (maxLen < (lengthIn * 2 / 3))
             {
diff --git a/src/common_audio/resampler/resampler.gypi b/src/common_audio/resampler/resampler.gypi
new file mode 100644
index 0000000..69f9b0e
--- /dev/null
+++ b/src/common_audio/resampler/resampler.gypi
@@ -0,0 +1,55 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+  'targets': [
+    {
+      'target_name': 'resampler',
+      'type': '<(library)',
+      'dependencies': [
+        'signal_processing',
+      ],
+      'include_dirs': [
+        'include',
+      ],
+      'direct_dependent_settings': {
+        'include_dirs': [
+          'include',
+        ],
+      },
+      'sources': [
+        'include/resampler.h',
+        'resampler.cc',
+      ],
+    },
+  ], # targets
+  'conditions': [
+    ['build_with_chromium==0', {
+      'targets' : [
+        {
+          'target_name': 'resampler_unittests',
+          'type': 'executable',
+          'dependencies': [
+            'resampler',
+            '<(webrtc_root)/../test/test.gyp:test_support_main',
+            '<(webrtc_root)/../testing/gtest.gyp:gtest',
+          ],
+          'sources': [
+            'resampler_unittest.cc',
+          ],
+        }, # resampler_unittests
+      ], # targets
+    }], # build_with_chromium
+  ], # conditions
+}
+
+# Local Variables:
+# tab-width:2
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/common_audio/resampler/resampler_unittest.cc b/src/common_audio/resampler/resampler_unittest.cc
new file mode 100644
index 0000000..9b1061a
--- /dev/null
+++ b/src/common_audio/resampler/resampler_unittest.cc
@@ -0,0 +1,143 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "gtest/gtest.h"
+
+#include "common_audio/resampler/include/resampler.h"
+
+// TODO(andrew): this is a work-in-progress. Many more tests are needed.
+
+namespace webrtc {
+namespace {
+const ResamplerType kTypes[] = {
+  kResamplerSynchronous,
+  kResamplerAsynchronous,
+  kResamplerSynchronousStereo,
+  kResamplerAsynchronousStereo
+  // kResamplerInvalid excluded
+};
+const size_t kTypesSize = sizeof(kTypes) / sizeof(*kTypes);
+
+// Rates we must support.
+const int kMaxRate = 96000;
+const int kRates[] = {
+  8000,
+  16000,
+  32000,
+  44000,
+  48000,
+  kMaxRate
+};
+const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
+const int kMaxChannels = 2;
+const size_t kDataSize = static_cast<size_t> (kMaxChannels * kMaxRate / 100);
+
+// TODO(andrew): should we be supporting these combinations?
+bool ValidRates(int in_rate, int out_rate) {
+  // Not the most compact notation, for clarity.
+  if ((in_rate == 44000 && (out_rate == 48000 || out_rate == 96000)) ||
+      (out_rate == 44000 && (in_rate == 48000 || in_rate == 96000))) {
+    return false;
+  }
+
+  return true;
+}
+
+class ResamplerTest : public testing::Test {
+ protected:
+  ResamplerTest();
+  virtual void SetUp();
+  virtual void TearDown();
+
+  Resampler rs_;
+  int16_t data_in_[kDataSize];
+  int16_t data_out_[kDataSize];
+};
+
+ResamplerTest::ResamplerTest() {}
+
+void ResamplerTest::SetUp() {
+  // Initialize input data with anything. The tests are content independent.
+  memset(data_in_, 1, sizeof(data_in_));
+}
+
+void ResamplerTest::TearDown() {}
+
+TEST_F(ResamplerTest, Reset) {
+  // The only failure mode for the constructor is if Reset() fails. For the
+  // time being then (until an Init function is added), we rely on Reset()
+  // to test the constructor.
+
+  // Check that all required combinations are supported.
+  for (size_t i = 0; i < kRatesSize; ++i) {
+    for (size_t j = 0; j < kRatesSize; ++j) {
+      for (size_t k = 0; k < kTypesSize; ++k) {
+        std::ostringstream ss;
+        ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j]
+            << ", type: " << kTypes[k];
+        SCOPED_TRACE(ss.str());
+        if (ValidRates(kRates[i], kRates[j]))
+          EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
+        else
+          EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
+      }
+    }
+  }
+}
+
+// TODO(tlegrand): Replace code inside the two tests below with a function
+// with number of channels and ResamplerType as input.
+TEST_F(ResamplerTest, Synchronous) {
+  for (size_t i = 0; i < kRatesSize; ++i) {
+    for (size_t j = 0; j < kRatesSize; ++j) {
+      std::ostringstream ss;
+      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
+      SCOPED_TRACE(ss.str());
+
+      if (ValidRates(kRates[i], kRates[j])) {
+        int in_length = kRates[i] / 100;
+        int out_length = 0;
+        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
+        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
+                              out_length));
+        EXPECT_EQ(kRates[j] / 100, out_length);
+      } else {
+        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
+      }
+    }
+  }
+}
+
+TEST_F(ResamplerTest, SynchronousStereo) {
+  // Number of channels is 2, stereo mode.
+  const int kChannels = 2;
+  for (size_t i = 0; i < kRatesSize; ++i) {
+    for (size_t j = 0; j < kRatesSize; ++j) {
+      std::ostringstream ss;
+      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
+      SCOPED_TRACE(ss.str());
+
+      if (ValidRates(kRates[i], kRates[j])) {
+        int in_length = kChannels * kRates[i] / 100;
+        int out_length = 0;
+        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j],
+                               kResamplerSynchronousStereo));
+        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
+                              out_length));
+        EXPECT_EQ(kChannels * kRates[j] / 100, out_length);
+      } else {
+        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j],
+                                kResamplerSynchronousStereo));
+      }
+    }
+  }
+}
+}  // namespace
+}  // namespace webrtc
diff --git a/src/common_audio/signal_processing_library/main/source/Android.mk b/src/common_audio/signal_processing/Android.mk
similarity index 66%
rename from src/common_audio/signal_processing_library/main/source/Android.mk
rename to src/common_audio/signal_processing/Android.mk
index 401a7f6..787e5c1 100644
--- a/src/common_audio/signal_processing_library/main/source/Android.mk
+++ b/src/common_audio/signal_processing/Android.mk
@@ -10,20 +10,18 @@
 
 include $(CLEAR_VARS)
 
+include $(LOCAL_PATH)/../../../android-webrtc.mk
+
 LOCAL_ARM_MODE := arm
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := libwebrtc_spl
 LOCAL_MODULE_TAGS := optional
-LOCAL_GENERATED_SOURCES :=
-LOCAL_SRC_FILES := add_sat_w16.c \
-    add_sat_w32.c \
+LOCAL_SRC_FILES := \
     auto_corr_to_refl_coef.c \
     auto_correlation.c \
     complex_fft.c \
-    complex_ifft.c \
     complex_bit_reverse.c \
     copy_set_operations.c \
-    cos_table.c \
     cross_correlation.c \
     division_operations.c \
     dot_product_with_scale.c \
@@ -34,16 +32,10 @@
     filter_ma_fast_q12.c \
     get_hanning_window.c \
     get_scaling_square.c \
-    get_size_in_bits.c \
-    hanning_table.c \
     ilbc_specific_functions.c \
     levinson_durbin.c \
     lpc_to_refl_coef.c \
     min_max_operations.c \
-    norm_u32.c \
-    norm_w16.c \
-    norm_w32.c \
-    randn_table.c \
     randomization_functions.c \
     refl_coef_to_lpc.c \
     resample.c \
@@ -51,45 +43,30 @@
     resample_by_2.c \
     resample_by_2_internal.c \
     resample_fractional.c \
-    sin_table.c \
-    sin_table_1024.c \
     spl_sqrt.c \
+    spl_sqrt_floor.c \
     spl_version.c \
     splitting_filter.c \
     sqrt_of_one_minus_x_squared.c \
-    sub_sat_w16.c \
-    sub_sat_w32.c \
     vector_scaling_operations.c
 
 # Flags passed to both C and C++ files.
-MY_CFLAGS :=  
-MY_CFLAGS_C :=
-MY_DEFS := '-DNO_TCMALLOC' \
-    '-DNO_HEAPCHECKER' \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' 
-ifeq ($(TARGET_ARCH),arm) 
-MY_DEFS += \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS)
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/include \
+    $(LOCAL_PATH)/../.. 
+
+ifeq ($(ARCH_ARM_HAVE_NEON),true)
+LOCAL_SRC_FILES += \
+    min_max_operations_neon.c
+LOCAL_CFLAGS += \
+    $(MY_ARM_CFLAGS_NEON)
 endif
-LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
-
-# Include paths placed before CFLAGS/CPPFLAGS
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/../../../.. \
-    $(LOCAL_PATH)/../interface 
-
-# Flags passed to only C++ (and not C) files.
-LOCAL_CPPFLAGS := 
-
-LOCAL_LDFLAGS :=
-
-LOCAL_STATIC_LIBRARIES :=
 
 LOCAL_SHARED_LIBRARIES := libstlport
 
-LOCAL_ADDITIONAL_DEPENDENCIES :=
-
 ifeq ($(TARGET_OS)-$(TARGET_SIMULATOR),linux-true)
 LOCAL_LDLIBS += -ldl -lpthread
 endif
@@ -98,5 +75,7 @@
 LOCAL_SHARED_LIBRARIES += libdl
 endif
 
+ifndef NDK_ROOT
 include external/stlport/libstlport.mk
+endif
 include $(BUILD_STATIC_LIBRARY)
diff --git a/src/common_audio/signal_processing_library/main/source/auto_corr_to_refl_coef.c b/src/common_audio/signal_processing/auto_corr_to_refl_coef.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/auto_corr_to_refl_coef.c
rename to src/common_audio/signal_processing/auto_corr_to_refl_coef.c
diff --git a/src/common_audio/signal_processing_library/main/source/auto_correlation.c b/src/common_audio/signal_processing/auto_correlation.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/auto_correlation.c
rename to src/common_audio/signal_processing/auto_correlation.c
diff --git a/src/common_audio/signal_processing_library/main/source/complex_bit_reverse.c b/src/common_audio/signal_processing/complex_bit_reverse.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/complex_bit_reverse.c
rename to src/common_audio/signal_processing/complex_bit_reverse.c
diff --git a/src/common_audio/signal_processing/complex_fft.c b/src/common_audio/signal_processing/complex_fft.c
new file mode 100644
index 0000000..1e8503c
--- /dev/null
+++ b/src/common_audio/signal_processing/complex_fft.c
@@ -0,0 +1,425 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_ComplexFFT().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "signal_processing_library.h"
+
+#define CFFTSFT 14
+#define CFFTRND 1
+#define CFFTRND2 16384
+
+#define CIFFTSFT 14
+#define CIFFTRND 1
+
+static const WebRtc_Word16 kSinTable1024[] = {
+      0,    201,    402,    603,    804,   1005,   1206,   1406,
+   1607,   1808,   2009,   2209,   2410,   2610,   2811,   3011,
+   3211,   3411,   3611,   3811,   4011,   4210,   4409,   4608,
+   4807,   5006,   5205,   5403,   5601,   5799,   5997,   6195,
+   6392,   6589,   6786,   6982,   7179,   7375,   7571,   7766,
+   7961,   8156,   8351,   8545,   8739,   8932,   9126,   9319,
+   9511,   9703,   9895,  10087,  10278,  10469,  10659,  10849,
+  11038,  11227,  11416,  11604,  11792,  11980,  12166,  12353,
+  12539,  12724,  12909,  13094,  13278,  13462,  13645,  13827,
+  14009,  14191,  14372,  14552,  14732,  14911,  15090,  15268,
+  15446,  15623,  15799,  15975,  16150,  16325,  16499,  16672,
+  16845,  17017,  17189,  17360,  17530,  17699,  17868,  18036,
+  18204,  18371,  18537,  18702,  18867,  19031,  19194,  19357,
+  19519,  19680,  19840,  20000,  20159,  20317,  20474,  20631,
+  20787,  20942,  21096,  21249,  21402,  21554,  21705,  21855,
+  22004,  22153,  22301,  22448,  22594,  22739,  22883,  23027,
+  23169,  23311,  23452,  23592,  23731,  23869,  24006,  24143,
+  24278,  24413,  24546,  24679,  24811,  24942,  25072,  25201,
+  25329,  25456,  25582,  25707,  25831,  25954,  26077,  26198,
+  26318,  26437,  26556,  26673,  26789,  26905,  27019,  27132,
+  27244,  27355,  27466,  27575,  27683,  27790,  27896,  28001,
+  28105,  28208,  28309,  28410,  28510,  28608,  28706,  28802,
+  28897,  28992,  29085,  29177,  29268,  29358,  29446,  29534,
+  29621,  29706,  29790,  29873,  29955,  30036,  30116,  30195,
+  30272,  30349,  30424,  30498,  30571,  30643,  30713,  30783,
+  30851,  30918,  30984,  31049,
+  31113,  31175,  31236,  31297,
+  31356,  31413,  31470,  31525,  31580,  31633,  31684,  31735,
+  31785,  31833,  31880,  31926,  31970,  32014,  32056,  32097,
+  32137,  32176,  32213,  32249,  32284,  32318,  32350,  32382,
+  32412,  32441,  32468,  32495,  32520,  32544,  32567,  32588,
+  32609,  32628,  32646,  32662,  32678,  32692,  32705,  32717,
+  32727,  32736,  32744,  32751,  32757,  32761,  32764,  32766,
+  32767,  32766,  32764,  32761,  32757,  32751,  32744,  32736,
+  32727,  32717,  32705,  32692,  32678,  32662,  32646,  32628,
+  32609,  32588,  32567,  32544,  32520,  32495,  32468,  32441,
+  32412,  32382,  32350,  32318,  32284,  32249,  32213,  32176,
+  32137,  32097,  32056,  32014,  31970,  31926,  31880,  31833,
+  31785,  31735,  31684,  31633,  31580,  31525,  31470,  31413,
+  31356,  31297,  31236,  31175,  31113,  31049,  30984,  30918,
+  30851,  30783,  30713,  30643,  30571,  30498,  30424,  30349,
+  30272,  30195,  30116,  30036,  29955,  29873,  29790,  29706,
+  29621,  29534,  29446,  29358,  29268,  29177,  29085,  28992,
+  28897,  28802,  28706,  28608,  28510,  28410,  28309,  28208,
+  28105,  28001,  27896,  27790,  27683,  27575,  27466,  27355,
+  27244,  27132,  27019,  26905,  26789,  26673,  26556,  26437,
+  26318,  26198,  26077,  25954,  25831,  25707,  25582,  25456,
+  25329,  25201,  25072,  24942,  24811,  24679,  24546,  24413,
+  24278,  24143,  24006,  23869,  23731,  23592,  23452,  23311,
+  23169,  23027,  22883,  22739,  22594,  22448,  22301,  22153,
+  22004,  21855,  21705,  21554,  21402,  21249,  21096,  20942,
+  20787,  20631,  20474,  20317,  20159,  20000,  19840,  19680,
+  19519,  19357,  19194,  19031,  18867,  18702,  18537,  18371,
+  18204,  18036,  17868,  17699,  17530,  17360,  17189,  17017,
+  16845,  16672,  16499,  16325,  16150,  15975,  15799,  15623,
+  15446,  15268,  15090,  14911,  14732,  14552,  14372,  14191,
+  14009,  13827,  13645,  13462,  13278,  13094,  12909,  12724,
+  12539,  12353,  12166,  11980,  11792,  11604,  11416,  11227,
+  11038,  10849,  10659,  10469,  10278,  10087,   9895,   9703,
+   9511,   9319,   9126,   8932,   8739,   8545,   8351,   8156,
+   7961,   7766,   7571,   7375,   7179,   6982,   6786,   6589,
+   6392,   6195,   5997,   5799,   5601,   5403,   5205,   5006,
+   4807,   4608,   4409,   4210,   4011,   3811,   3611,   3411,
+   3211,   3011,   2811,   2610,   2410,   2209,   2009,   1808,
+   1607,   1406,   1206,   1005,    804,    603,    402,    201,
+      0,   -201,   -402,   -603,   -804,  -1005,  -1206,  -1406,
+  -1607,  -1808,  -2009,  -2209,  -2410,  -2610,  -2811,  -3011,
+  -3211,  -3411,  -3611,  -3811,  -4011,  -4210,  -4409,  -4608,
+  -4807,  -5006,  -5205,  -5403,  -5601,  -5799,  -5997,  -6195,
+  -6392,  -6589,  -6786,  -6982,  -7179,  -7375,  -7571,  -7766,
+  -7961,  -8156,  -8351,  -8545,  -8739,  -8932,  -9126,  -9319,
+  -9511,  -9703,  -9895, -10087, -10278, -10469, -10659, -10849,
+ -11038, -11227, -11416, -11604, -11792, -11980, -12166, -12353,
+ -12539, -12724, -12909, -13094, -13278, -13462, -13645, -13827,
+ -14009, -14191, -14372, -14552, -14732, -14911, -15090, -15268,
+ -15446, -15623, -15799, -15975, -16150, -16325, -16499, -16672,
+ -16845, -17017, -17189, -17360, -17530, -17699, -17868, -18036,
+ -18204, -18371, -18537, -18702, -18867, -19031, -19194, -19357,
+ -19519, -19680, -19840, -20000, -20159, -20317, -20474, -20631,
+ -20787, -20942, -21096, -21249, -21402, -21554, -21705, -21855,
+ -22004, -22153, -22301, -22448, -22594, -22739, -22883, -23027,
+ -23169, -23311, -23452, -23592, -23731, -23869, -24006, -24143,
+ -24278, -24413, -24546, -24679, -24811, -24942, -25072, -25201,
+ -25329, -25456, -25582, -25707, -25831, -25954, -26077, -26198,
+ -26318, -26437, -26556, -26673, -26789, -26905, -27019, -27132,
+ -27244, -27355, -27466, -27575, -27683, -27790, -27896, -28001,
+ -28105, -28208, -28309, -28410, -28510, -28608, -28706, -28802,
+ -28897, -28992, -29085, -29177, -29268, -29358, -29446, -29534,
+ -29621, -29706, -29790, -29873, -29955, -30036, -30116, -30195,
+ -30272, -30349, -30424, -30498, -30571, -30643, -30713, -30783,
+ -30851, -30918, -30984, -31049, -31113, -31175, -31236, -31297,
+ -31356, -31413, -31470, -31525, -31580, -31633, -31684, -31735,
+ -31785, -31833, -31880, -31926, -31970, -32014, -32056, -32097,
+ -32137, -32176, -32213, -32249, -32284, -32318, -32350, -32382,
+ -32412, -32441, -32468, -32495, -32520, -32544, -32567, -32588,
+ -32609, -32628, -32646, -32662, -32678, -32692, -32705, -32717,
+ -32727, -32736, -32744, -32751, -32757, -32761, -32764, -32766,
+ -32767, -32766, -32764, -32761, -32757, -32751, -32744, -32736,
+ -32727, -32717, -32705, -32692, -32678, -32662, -32646, -32628,
+ -32609, -32588, -32567, -32544, -32520, -32495, -32468, -32441,
+ -32412, -32382, -32350, -32318, -32284, -32249, -32213, -32176,
+ -32137, -32097, -32056, -32014, -31970, -31926, -31880, -31833,
+ -31785, -31735, -31684, -31633, -31580, -31525, -31470, -31413,
+ -31356, -31297, -31236, -31175, -31113, -31049, -30984, -30918,
+ -30851, -30783, -30713, -30643, -30571, -30498, -30424, -30349,
+ -30272, -30195, -30116, -30036, -29955, -29873, -29790, -29706,
+ -29621, -29534, -29446, -29358, -29268, -29177, -29085, -28992,
+ -28897, -28802, -28706, -28608, -28510, -28410, -28309, -28208,
+ -28105, -28001, -27896, -27790, -27683, -27575, -27466, -27355,
+ -27244, -27132, -27019, -26905, -26789, -26673, -26556, -26437,
+ -26318, -26198, -26077, -25954, -25831, -25707, -25582, -25456,
+ -25329, -25201, -25072, -24942, -24811, -24679, -24546, -24413,
+ -24278, -24143, -24006, -23869, -23731, -23592, -23452, -23311,
+ -23169, -23027, -22883, -22739, -22594, -22448, -22301, -22153,
+ -22004, -21855, -21705, -21554, -21402, -21249, -21096, -20942,
+ -20787, -20631, -20474, -20317, -20159, -20000, -19840, -19680,
+ -19519, -19357, -19194, -19031, -18867, -18702, -18537, -18371,
+ -18204, -18036, -17868, -17699, -17530, -17360, -17189, -17017,
+ -16845, -16672, -16499, -16325, -16150, -15975, -15799, -15623,
+ -15446, -15268, -15090, -14911, -14732, -14552, -14372, -14191,
+ -14009, -13827, -13645, -13462, -13278, -13094, -12909, -12724,
+ -12539, -12353, -12166, -11980, -11792, -11604, -11416, -11227,
+ -11038, -10849, -10659, -10469, -10278, -10087,  -9895,  -9703,
+  -9511,  -9319,  -9126,  -8932,  -8739,  -8545,  -8351,  -8156,
+  -7961,  -7766,  -7571,  -7375,  -7179,  -6982,  -6786,  -6589,
+  -6392,  -6195,  -5997,  -5799,  -5601,  -5403,  -5205,  -5006,
+  -4807,  -4608,  -4409,  -4210,  -4011,  -3811,  -3611,  -3411,
+  -3211,  -3011,  -2811,  -2610,  -2410,  -2209,  -2009,  -1808,
+  -1607,  -1406,  -1206,  -1005,   -804,   -603,   -402,   -201
+};
+
+int WebRtcSpl_ComplexFFT(WebRtc_Word16 frfi[], int stages, int mode)
+{
+    int i, j, l, k, istep, n, m;
+    WebRtc_Word16 wr, wi;
+    WebRtc_Word32 tr32, ti32, qr32, qi32;
+
+    /* The 1024-value is a constant given from the size of kSinTable1024[],
+     * and should not be changed depending on the input parameter 'stages'
+     */
+    n = 1 << stages;
+    if (n > 1024)
+        return -1;
+
+    l = 1;
+    k = 10 - 1; /* Constant for given kSinTable1024[]. Do not change
+         depending on the input parameter 'stages' */
+
+    if (mode == 0)
+    {
+        // mode==0: Low-complexity and Low-accuracy mode
+        while (l < n)
+        {
+            istep = l << 1;
+
+            for (m = 0; m < l; ++m)
+            {
+                j = m << k;
+
+                /* The 256-value is a constant given as 1/4 of the size of
+                 * kSinTable1024[], and should not be changed depending on the input
+                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
+                 */
+                wr = kSinTable1024[j + 256];
+                wi = -kSinTable1024[j];
+
+                for (i = m; i < n; i += istep)
+                {
+                    j = i + l;
+
+                    tr32 = WEBRTC_SPL_RSHIFT_W32((WEBRTC_SPL_MUL_16_16(wr, frfi[2 * j])
+                            - WEBRTC_SPL_MUL_16_16(wi, frfi[2 * j + 1])), 15);
+
+                    ti32 = WEBRTC_SPL_RSHIFT_W32((WEBRTC_SPL_MUL_16_16(wr, frfi[2 * j + 1])
+                            + WEBRTC_SPL_MUL_16_16(wi, frfi[2 * j])), 15);
+
+                    qr32 = (WebRtc_Word32)frfi[2 * i];
+                    qi32 = (WebRtc_Word32)frfi[2 * i + 1];
+                    frfi[2 * j] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qr32 - tr32, 1);
+                    frfi[2 * j + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qi32 - ti32, 1);
+                    frfi[2 * i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qr32 + tr32, 1);
+                    frfi[2 * i + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qi32 + ti32, 1);
+                }
+            }
+
+            --k;
+            l = istep;
+
+        }
+
+    } else
+    {
+        // mode==1: High-complexity and High-accuracy mode
+        while (l < n)
+        {
+            istep = l << 1;
+
+            for (m = 0; m < l; ++m)
+            {
+                j = m << k;
+
+                /* The 256-value is a constant given as 1/4 of the size of
+                 * kSinTable1024[], and should not be changed depending on the input
+                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
+                 */
+                wr = kSinTable1024[j + 256];
+                wi = -kSinTable1024[j];
+
+#ifdef WEBRTC_ARCH_ARM_V7A
+                WebRtc_Word32 wri;
+                WebRtc_Word32 frfi_r;
+                __asm__("pkhbt %0, %1, %2, lsl #16" : "=r"(wri) :
+                    "r"((WebRtc_Word32)wr), "r"((WebRtc_Word32)wi));
+#endif
+
+                for (i = m; i < n; i += istep)
+                {
+                    j = i + l;
+
+#ifdef WEBRTC_ARCH_ARM_V7A
+                    __asm__("pkhbt %0, %1, %2, lsl #16" : "=r"(frfi_r) :
+                        "r"((WebRtc_Word32)frfi[2*j]), "r"((WebRtc_Word32)frfi[2*j +1]));
+                    __asm__("smlsd %0, %1, %2, %3" : "=r"(tr32) :
+                        "r"(wri), "r"(frfi_r), "r"(CFFTRND));
+                    __asm__("smladx %0, %1, %2, %3" : "=r"(ti32) :
+                        "r"(wri), "r"(frfi_r), "r"(CFFTRND));
+    
+#else
+                    tr32 = WEBRTC_SPL_MUL_16_16(wr, frfi[2 * j])
+                            - WEBRTC_SPL_MUL_16_16(wi, frfi[2 * j + 1]) + CFFTRND;
+
+                    ti32 = WEBRTC_SPL_MUL_16_16(wr, frfi[2 * j + 1])
+                            + WEBRTC_SPL_MUL_16_16(wi, frfi[2 * j]) + CFFTRND;
+#endif
+
+                    tr32 = WEBRTC_SPL_RSHIFT_W32(tr32, 15 - CFFTSFT);
+                    ti32 = WEBRTC_SPL_RSHIFT_W32(ti32, 15 - CFFTSFT);
+
+                    qr32 = ((WebRtc_Word32)frfi[2 * i]) << CFFTSFT;
+                    qi32 = ((WebRtc_Word32)frfi[2 * i + 1]) << CFFTSFT;
+
+                    frfi[2 * j] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
+                            (qr32 - tr32 + CFFTRND2), 1 + CFFTSFT);
+                    frfi[2 * j + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
+                            (qi32 - ti32 + CFFTRND2), 1 + CFFTSFT);
+                    frfi[2 * i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
+                            (qr32 + tr32 + CFFTRND2), 1 + CFFTSFT);
+                    frfi[2 * i + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
+                            (qi32 + ti32 + CFFTRND2), 1 + CFFTSFT);
+                }
+            }
+
+            --k;
+            l = istep;
+        }
+    }
+    return 0;
+}
+
+int WebRtcSpl_ComplexIFFT(WebRtc_Word16 frfi[], int stages, int mode)
+{
+    int i, j, l, k, istep, n, m, scale, shift;
+    WebRtc_Word16 wr, wi;
+    WebRtc_Word32 tr32, ti32, qr32, qi32;
+    WebRtc_Word32 tmp32, round2;
+
+    /* The 1024-value is a constant given from the size of kSinTable1024[],
+     * and should not be changed depending on the input parameter 'stages'
+     */
+    n = 1 << stages;
+    if (n > 1024)
+        return -1;
+
+    scale = 0;
+
+    l = 1;
+    k = 10 - 1; /* Constant for given kSinTable1024[]. Do not change
+         depending on the input parameter 'stages' */
+
+    while (l < n)
+    {
+        // variable scaling, depending upon data
+        shift = 0;
+        round2 = 8192;
+
+        tmp32 = (WebRtc_Word32)WebRtcSpl_MaxAbsValueW16(frfi, 2 * n);
+        if (tmp32 > 13573)
+        {
+            shift++;
+            scale++;
+            round2 <<= 1;
+        }
+        if (tmp32 > 27146)
+        {
+            shift++;
+            scale++;
+            round2 <<= 1;
+        }
+
+        istep = l << 1;
+
+        if (mode == 0)
+        {
+            // mode==0: Low-complexity and Low-accuracy mode
+            for (m = 0; m < l; ++m)
+            {
+                j = m << k;
+
+                /* The 256-value is a constant given as 1/4 of the size of
+                 * kSinTable1024[], and should not be changed depending on the input
+                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
+                 */
+                wr = kSinTable1024[j + 256];
+                wi = kSinTable1024[j];
+
+                for (i = m; i < n; i += istep)
+                {
+                    j = i + l;
+
+                    tr32 = WEBRTC_SPL_RSHIFT_W32((WEBRTC_SPL_MUL_16_16_RSFT(wr, frfi[2 * j], 0)
+                            - WEBRTC_SPL_MUL_16_16_RSFT(wi, frfi[2 * j + 1], 0)), 15);
+
+                    ti32 = WEBRTC_SPL_RSHIFT_W32(
+                            (WEBRTC_SPL_MUL_16_16_RSFT(wr, frfi[2 * j + 1], 0)
+                                    + WEBRTC_SPL_MUL_16_16_RSFT(wi,frfi[2*j],0)), 15);
+
+                    qr32 = (WebRtc_Word32)frfi[2 * i];
+                    qi32 = (WebRtc_Word32)frfi[2 * i + 1];
+                    frfi[2 * j] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qr32 - tr32, shift);
+                    frfi[2 * j + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qi32 - ti32, shift);
+                    frfi[2 * i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qr32 + tr32, shift);
+                    frfi[2 * i + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qi32 + ti32, shift);
+                }
+            }
+        } else
+        {
+            // mode==1: High-complexity and High-accuracy mode
+
+            for (m = 0; m < l; ++m)
+            {
+                j = m << k;
+
+                /* The 256-value is a constant given as 1/4 of the size of
+                 * kSinTable1024[], and should not be changed depending on the input
+                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
+                 */
+                wr = kSinTable1024[j + 256];
+                wi = kSinTable1024[j];
+
+#ifdef WEBRTC_ARCH_ARM_V7A
+                WebRtc_Word32 wri;
+                WebRtc_Word32 frfi_r;
+                __asm__("pkhbt %0, %1, %2, lsl #16" : "=r"(wri) :
+                    "r"((WebRtc_Word32)wr), "r"((WebRtc_Word32)wi));
+#endif
+
+                for (i = m; i < n; i += istep)
+                {
+                    j = i + l;
+
+#ifdef WEBRTC_ARCH_ARM_V7A
+                    __asm__("pkhbt %0, %1, %2, lsl #16" : "=r"(frfi_r) :
+                        "r"((WebRtc_Word32)frfi[2*j]), "r"((WebRtc_Word32)frfi[2*j +1]));
+                    __asm__("smlsd %0, %1, %2, %3" : "=r"(tr32) :
+                        "r"(wri), "r"(frfi_r), "r"(CIFFTRND));
+                    __asm__("smladx %0, %1, %2, %3" : "=r"(ti32) :
+                        "r"(wri), "r"(frfi_r), "r"(CIFFTRND));
+#else
+
+                    tr32 = WEBRTC_SPL_MUL_16_16(wr, frfi[2 * j])
+                            - WEBRTC_SPL_MUL_16_16(wi, frfi[2 * j + 1]) + CIFFTRND;
+
+                    ti32 = WEBRTC_SPL_MUL_16_16(wr, frfi[2 * j + 1])
+                            + WEBRTC_SPL_MUL_16_16(wi, frfi[2 * j]) + CIFFTRND;
+#endif
+                    tr32 = WEBRTC_SPL_RSHIFT_W32(tr32, 15 - CIFFTSFT);
+                    ti32 = WEBRTC_SPL_RSHIFT_W32(ti32, 15 - CIFFTSFT);
+
+                    qr32 = ((WebRtc_Word32)frfi[2 * i]) << CIFFTSFT;
+                    qi32 = ((WebRtc_Word32)frfi[2 * i + 1]) << CIFFTSFT;
+
+                    frfi[2 * j] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32((qr32 - tr32+round2),
+                                                                       shift+CIFFTSFT);
+                    frfi[2 * j + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
+                            (qi32 - ti32 + round2), shift + CIFFTSFT);
+                    frfi[2 * i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32((qr32 + tr32 + round2),
+                                                                       shift + CIFFTSFT);
+                    frfi[2 * i + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
+                            (qi32 + ti32 + round2), shift + CIFFTSFT);
+                }
+            }
+
+        }
+        --k;
+        l = istep;
+    }
+    return scale;
+}
diff --git a/src/common_audio/signal_processing_library/main/source/copy_set_operations.c b/src/common_audio/signal_processing/copy_set_operations.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/copy_set_operations.c
rename to src/common_audio/signal_processing/copy_set_operations.c
diff --git a/src/common_audio/signal_processing_library/main/source/cross_correlation.c b/src/common_audio/signal_processing/cross_correlation.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/cross_correlation.c
rename to src/common_audio/signal_processing/cross_correlation.c
diff --git a/src/common_audio/signal_processing_library/main/source/division_operations.c b/src/common_audio/signal_processing/division_operations.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/division_operations.c
rename to src/common_audio/signal_processing/division_operations.c
diff --git a/src/common_audio/signal_processing_library/main/source/dot_product_with_scale.c b/src/common_audio/signal_processing/dot_product_with_scale.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/dot_product_with_scale.c
rename to src/common_audio/signal_processing/dot_product_with_scale.c
diff --git a/src/common_audio/signal_processing_library/main/source/downsample_fast.c b/src/common_audio/signal_processing/downsample_fast.c
similarity index 95%
rename from src/common_audio/signal_processing_library/main/source/downsample_fast.c
rename to src/common_audio/signal_processing/downsample_fast.c
index 9338275..cce463c 100644
--- a/src/common_audio/signal_processing_library/main/source/downsample_fast.c
+++ b/src/common_audio/signal_processing/downsample_fast.c
@@ -52,7 +52,7 @@
 
         // If output is higher than 32768, saturate it. Same with negative side
 
-        *downsampled_ptr++ = (WebRtc_Word16)WEBRTC_SPL_SAT(32767, o, -32768);
+        *downsampled_ptr++ = WebRtcSpl_SatW32ToW16(o);
     }
 
     return 0;
diff --git a/src/common_audio/signal_processing_library/main/source/energy.c b/src/common_audio/signal_processing/energy.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/energy.c
rename to src/common_audio/signal_processing/energy.c
diff --git a/src/common_audio/signal_processing_library/main/source/filter_ar.c b/src/common_audio/signal_processing/filter_ar.c
similarity index 96%
rename from src/common_audio/signal_processing_library/main/source/filter_ar.c
rename to src/common_audio/signal_processing/filter_ar.c
index 30a56c1..24e83a6 100644
--- a/src/common_audio/signal_processing_library/main/source/filter_ar.c
+++ b/src/common_audio/signal_processing/filter_ar.c
@@ -36,9 +36,6 @@
     WebRtc_Word16* filteredFINAL_ptr = filtered;
     WebRtc_Word16* filteredFINAL_LOW_ptr = filtered_low;
 
-    state_low_length = state_low_length;
-    filtered_low_length = filtered_low_length;
-
     for (i = 0; i < x_length; i++)
     {
         // Calculate filtered[i] and filtered_low[i]
diff --git a/src/common_audio/signal_processing_library/main/source/filter_ar_fast_q12.c b/src/common_audio/signal_processing/filter_ar_fast_q12.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/filter_ar_fast_q12.c
rename to src/common_audio/signal_processing/filter_ar_fast_q12.c
diff --git a/src/common_audio/signal_processing_library/main/source/filter_ma_fast_q12.c b/src/common_audio/signal_processing/filter_ma_fast_q12.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/filter_ma_fast_q12.c
rename to src/common_audio/signal_processing/filter_ma_fast_q12.c
diff --git a/src/common_audio/signal_processing_library/main/source/hanning_table.c b/src/common_audio/signal_processing/get_hanning_window.c
similarity index 78%
rename from src/common_audio/signal_processing_library/main/source/hanning_table.c
rename to src/common_audio/signal_processing/get_hanning_window.c
index 112d0e5..6d67e60 100644
--- a/src/common_audio/signal_processing_library/main/source/hanning_table.c
+++ b/src/common_audio/signal_processing/get_hanning_window.c
@@ -10,14 +10,15 @@
 
 
 /*
- * This file contains the Hanning table with 256 entries.
+ * This file contains the function WebRtcSpl_GetHanningWindow().
+ * The description header can be found in signal_processing_library.h
  *
  */
 
 #include "signal_processing_library.h"
 
 // Hanning table with 256 entries
-WebRtc_Word16 WebRtcSpl_kHanningTable[] = {
+static const WebRtc_Word16 kHanningTable[] = {
     1,      2,      6,     10,     15,     22,     30,     39,
    50,     62,     75,     89,    104,    121,    138,    157,
   178,    199,    222,    246,    271,    297,    324,    353,
@@ -51,3 +52,26 @@
 16246,  16263,  16280,  16295,  16309,  16322,  16334,  16345,
 16354,  16362,  16369,  16374,  16378,  16382,  16383,  16384
 };
+
+void WebRtcSpl_GetHanningWindow(WebRtc_Word16 *v, WebRtc_Word16 size)
+{
+    int jj;
+    WebRtc_Word16 *vptr1;
+
+    WebRtc_Word32 index;
+    WebRtc_Word32 factor = ((WebRtc_Word32)0x40000000);
+
+    factor = WebRtcSpl_DivW32W16(factor, size);
+    if (size < 513)
+        index = (WebRtc_Word32)-0x200000;
+    else
+        index = (WebRtc_Word32)-0x100000;
+    vptr1 = v;
+
+    for (jj = 0; jj < size; jj++)
+    {
+        index += factor;
+        (*vptr1++) = kHanningTable[index >> 22];
+    }
+
+}
diff --git a/src/common_audio/signal_processing_library/main/source/get_scaling_square.c b/src/common_audio/signal_processing/get_scaling_square.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/get_scaling_square.c
rename to src/common_audio/signal_processing/get_scaling_square.c
diff --git a/src/common_audio/signal_processing_library/main/source/ilbc_specific_functions.c b/src/common_audio/signal_processing/ilbc_specific_functions.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/ilbc_specific_functions.c
rename to src/common_audio/signal_processing/ilbc_specific_functions.c
diff --git a/src/common_audio/signal_processing_library/main/interface/signal_processing_library.h b/src/common_audio/signal_processing/include/signal_processing_library.h
similarity index 85%
rename from src/common_audio/signal_processing_library/main/interface/signal_processing_library.h
rename to src/common_audio/signal_processing/include/signal_processing_library.h
index 414e045..651a68c 100644
--- a/src/common_audio/signal_processing_library/main/interface/signal_processing_library.h
+++ b/src/common_audio/signal_processing/include/signal_processing_library.h
@@ -25,11 +25,6 @@
 #include <Armintr.h> // intrinsic file for windows mobile
 #endif
 
-#ifdef WEBRTC_ANDROID
-#define WEBRTC_SPL_INLINE_CALLS
-#define SPL_NO_DOUBLE_IMPLEMENTATIONS
-#endif
-
 // Macros specific for the fixed point implementation
 #define WEBRTC_SPL_WORD16_MAX       32767
 #define WEBRTC_SPL_WORD16_MIN       -32768
@@ -39,100 +34,97 @@
 #define WEBRTC_SPL_MAX_SEED_USED    0x80000000L
 #define WEBRTC_SPL_MIN(A, B)        (A < B ? A : B) // Get min value
 #define WEBRTC_SPL_MAX(A, B)        (A > B ? A : B) // Get max value
-#define WEBRTC_SPL_ABS_W16(a)\
+#define WEBRTC_SPL_ABS_W16(a) \
     (((WebRtc_Word16)a >= 0) ? ((WebRtc_Word16)a) : -((WebRtc_Word16)a))
-#define WEBRTC_SPL_ABS_W32(a)\
+#define WEBRTC_SPL_ABS_W32(a) \
     (((WebRtc_Word32)a >= 0) ? ((WebRtc_Word32)a) : -((WebRtc_Word32)a))
 
 #if (defined WEBRTC_TARGET_PC)||(defined __TARGET_XSCALE)
 #define WEBRTC_SPL_GET_BYTE(a, nr)  (((WebRtc_Word8 *)a)[nr])
-#define WEBRTC_SPL_SET_BYTE(d_ptr, val, index)  \
-  (((WebRtc_Word8 *)d_ptr)[index] = (val))
+#define WEBRTC_SPL_SET_BYTE(d_ptr, val, index) \
+    (((WebRtc_Word8 *)d_ptr)[index] = (val))
 #elif defined WEBRTC_BIG_ENDIAN
-#define WEBRTC_SPL_GET_BYTE(a, nr)\
+#define WEBRTC_SPL_GET_BYTE(a, nr) \
     ((((WebRtc_Word16 *)a)[nr >> 1]) >> (((nr + 1) & 0x1) * 8) & 0x00ff)
-#define WEBRTC_SPL_SET_BYTE(d_ptr, val, index)                          \
-  ((WebRtc_Word16 *)d_ptr)[index >> 1] = \
-      ((((WebRtc_Word16 *)d_ptr)[index >> 1])                           \
-       & (0x00ff << (8 * ((index) & 0x1)))) | (val << (8 * ((index + 1) & 0x1)))
+#define WEBRTC_SPL_SET_BYTE(d_ptr, val, index) \
+    ((WebRtc_Word16 *)d_ptr)[index >> 1] = \
+    ((((WebRtc_Word16 *)d_ptr)[index >> 1]) \
+    & (0x00ff << (8 * ((index) & 0x1)))) | (val << (8 * ((index + 1) & 0x1)))
 #else
-#define WEBRTC_SPL_GET_BYTE(a,nr)                                       \
-  ((((WebRtc_Word16 *)(a))[(nr) >> 1]) >> (((nr) & 0x1) * 8) & 0x00ff)
-#define WEBRTC_SPL_SET_BYTE(d_ptr, val, index)                          \
-  ((WebRtc_Word16 *)(d_ptr))[(index) >> 1] = \
-      ((((WebRtc_Word16 *)(d_ptr))[(index) >> 1])                       \
-       & (0x00ff << (8 * (((index) + 1) & 0x1)))) |                     \
-      ((val) << (8 * ((index) & 0x1)))
+#define WEBRTC_SPL_GET_BYTE(a,nr) \
+    ((((WebRtc_Word16 *)(a))[(nr) >> 1]) >> (((nr) & 0x1) * 8) & 0x00ff)
+#define WEBRTC_SPL_SET_BYTE(d_ptr, val, index) \
+    ((WebRtc_Word16 *)(d_ptr))[(index) >> 1] = \
+    ((((WebRtc_Word16 *)(d_ptr))[(index) >> 1]) \
+    & (0x00ff << (8 * (((index) + 1) & 0x1)))) | \
+    ((val) << (8 * ((index) & 0x1)))
 #endif
 
-#ifndef WEBRTC_ANDROID
-#define WEBRTC_SPL_MUL(a, b)                                    \
-  ((WebRtc_Word32) ((WebRtc_Word32)(a) * (WebRtc_Word32)(b)))
-#endif
-
-#define WEBRTC_SPL_UMUL(a, b)                                           \
-  ((WebRtc_UWord32) ((WebRtc_UWord32)(a) * (WebRtc_UWord32)(b)))
-#define WEBRTC_SPL_UMUL_RSFT16(a, b)\
+#define WEBRTC_SPL_MUL(a, b) \
+    ((WebRtc_Word32) ((WebRtc_Word32)(a) * (WebRtc_Word32)(b)))
+#define WEBRTC_SPL_UMUL(a, b) \
+    ((WebRtc_UWord32) ((WebRtc_UWord32)(a) * (WebRtc_UWord32)(b)))
+#define WEBRTC_SPL_UMUL_RSFT16(a, b) \
     ((WebRtc_UWord32) ((WebRtc_UWord32)(a) * (WebRtc_UWord32)(b)) >> 16)
-#define WEBRTC_SPL_UMUL_16_16(a, b)\
+#define WEBRTC_SPL_UMUL_16_16(a, b) \
     ((WebRtc_UWord32) (WebRtc_UWord16)(a) * (WebRtc_UWord16)(b))
-#define WEBRTC_SPL_UMUL_16_16_RSFT16(a, b)\
+#define WEBRTC_SPL_UMUL_16_16_RSFT16(a, b) \
     (((WebRtc_UWord32) (WebRtc_UWord16)(a) * (WebRtc_UWord16)(b)) >> 16)
-#define WEBRTC_SPL_UMUL_32_16(a, b)\
+#define WEBRTC_SPL_UMUL_32_16(a, b) \
     ((WebRtc_UWord32) ((WebRtc_UWord32)(a) * (WebRtc_UWord16)(b)))
-#define WEBRTC_SPL_UMUL_32_16_RSFT16(a, b)\
+#define WEBRTC_SPL_UMUL_32_16_RSFT16(a, b) \
     ((WebRtc_UWord32) ((WebRtc_UWord32)(a) * (WebRtc_UWord16)(b)) >> 16)
-#define WEBRTC_SPL_MUL_16_U16(a, b)\
+#define WEBRTC_SPL_MUL_16_U16(a, b) \
     ((WebRtc_Word32)(WebRtc_Word16)(a) * (WebRtc_UWord16)(b))
-#define WEBRTC_SPL_DIV(a, b)    \
-  ((WebRtc_Word32) ((WebRtc_Word32)(a) / (WebRtc_Word32)(b)))
-#define WEBRTC_SPL_UDIV(a, b)   \
-  ((WebRtc_UWord32) ((WebRtc_UWord32)(a) / (WebRtc_UWord32)(b)))
+#define WEBRTC_SPL_DIV(a, b) \
+    ((WebRtc_Word32) ((WebRtc_Word32)(a) / (WebRtc_Word32)(b)))
+#define WEBRTC_SPL_UDIV(a, b) \
+    ((WebRtc_UWord32) ((WebRtc_UWord32)(a) / (WebRtc_UWord32)(b)))
 
-#define WEBRTC_SPL_MUL_16_32_RSFT11(a, b)\
-  ((WEBRTC_SPL_MUL_16_16(a, (b) >> 16) << 5)                            \
-     + (((WEBRTC_SPL_MUL_16_U16(a, (WebRtc_UWord16)(b)) >> 1) + 0x0200) >> 10))
-#define WEBRTC_SPL_MUL_16_32_RSFT14(a, b)\
-  ((WEBRTC_SPL_MUL_16_16(a, (b) >> 16) << 2)                            \
-   + (((WEBRTC_SPL_MUL_16_U16(a, (WebRtc_UWord16)(b)) >> 1) + 0x1000) >> 13))
-#define WEBRTC_SPL_MUL_16_32_RSFT15(a, b)                               \
-  ((WEBRTC_SPL_MUL_16_16(a, (b) >> 16) << 1)                            \
-   + (((WEBRTC_SPL_MUL_16_U16(a, (WebRtc_UWord16)(b)) >> 1) + 0x2000) >> 14))
-
-#ifndef WEBRTC_ANDROID
-#define WEBRTC_SPL_MUL_16_32_RSFT16(a, b)                               \
-  (WEBRTC_SPL_MUL_16_16(a, b >> 16)                                     \
-   + ((WEBRTC_SPL_MUL_16_16(a, (b & 0xffff) >> 1) + 0x4000) >> 15))
-#define WEBRTC_SPL_MUL_32_32_RSFT32(a32a, a32b, b32)                    \
-  ((WebRtc_Word32)(WEBRTC_SPL_MUL_16_32_RSFT16(a32a, b32)               \
-                   + (WEBRTC_SPL_MUL_16_32_RSFT16(a32b, b32) >> 16)))
-#define WEBRTC_SPL_MUL_32_32_RSFT32BI(a32, b32)                         \
-  ((WebRtc_Word32)(WEBRTC_SPL_MUL_16_32_RSFT16((                        \
-      (WebRtc_Word16)(a32 >> 16)), b32) +                               \
-                   (WEBRTC_SPL_MUL_16_32_RSFT16((                       \
-                       (WebRtc_Word16)((a32 & 0x0000FFFF) >> 1)), b32) >> 15)))
+#ifndef WEBRTC_ARCH_ARM_V7A
+// For ARMv7 platforms, these are inline functions in spl_inl_armv7.h
+#define WEBRTC_SPL_MUL_16_16(a, b) \
+    ((WebRtc_Word32) (((WebRtc_Word16)(a)) * ((WebRtc_Word16)(b))))
+#define WEBRTC_SPL_MUL_16_32_RSFT16(a, b) \
+    (WEBRTC_SPL_MUL_16_16(a, b >> 16) \
+     + ((WEBRTC_SPL_MUL_16_16(a, (b & 0xffff) >> 1) + 0x4000) >> 15))
+#define WEBRTC_SPL_MUL_32_32_RSFT32(a32a, a32b, b32) \
+    ((WebRtc_Word32)(WEBRTC_SPL_MUL_16_32_RSFT16(a32a, b32) \
+    + (WEBRTC_SPL_MUL_16_32_RSFT16(a32b, b32) >> 16)))
+#define WEBRTC_SPL_MUL_32_32_RSFT32BI(a32, b32) \
+    ((WebRtc_Word32)(WEBRTC_SPL_MUL_16_32_RSFT16(( \
+    (WebRtc_Word16)(a32 >> 16)), b32) + \
+    (WEBRTC_SPL_MUL_16_32_RSFT16(( \
+    (WebRtc_Word16)((a32 & 0x0000FFFF) >> 1)), b32) >> 15)))
 #endif
 
+#define WEBRTC_SPL_MUL_16_32_RSFT11(a, b) \
+    ((WEBRTC_SPL_MUL_16_16(a, (b) >> 16) << 5) \
+    + (((WEBRTC_SPL_MUL_16_U16(a, (WebRtc_UWord16)(b)) >> 1) + 0x0200) >> 10))
+#define WEBRTC_SPL_MUL_16_32_RSFT14(a, b) \
+    ((WEBRTC_SPL_MUL_16_16(a, (b) >> 16) << 2) \
+    + (((WEBRTC_SPL_MUL_16_U16(a, (WebRtc_UWord16)(b)) >> 1) + 0x1000) >> 13))
+#define WEBRTC_SPL_MUL_16_32_RSFT15(a, b) \
+    ((WEBRTC_SPL_MUL_16_16(a, (b) >> 16) << 1) \
+    + (((WEBRTC_SPL_MUL_16_U16(a, (WebRtc_UWord16)(b)) >> 1) + 0x2000) >> 14))
+
 #ifdef ARM_WINM
-#define WEBRTC_SPL_MUL_16_16(a, b)                      \
-  _SmulLo_SW_SL((WebRtc_Word16)(a), (WebRtc_Word16)(b))
-#elif !defined (WEBRTC_ANDROID)
-#define WEBRTC_SPL_MUL_16_16(a, b)                                      \
-    ((WebRtc_Word32) (((WebRtc_Word16)(a)) * ((WebRtc_Word16)(b))))
+#define WEBRTC_SPL_MUL_16_16(a, b) \
+    _SmulLo_SW_SL((WebRtc_Word16)(a), (WebRtc_Word16)(b))
 #endif
 
-#define WEBRTC_SPL_MUL_16_16_RSFT(a, b, c)      \
-  (WEBRTC_SPL_MUL_16_16(a, b) >> (c))
+#define WEBRTC_SPL_MUL_16_16_RSFT(a, b, c) \
+    (WEBRTC_SPL_MUL_16_16(a, b) >> (c))
 
-#define WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(a, b, c)                   \
-  ((WEBRTC_SPL_MUL_16_16(a, b) + ((WebRtc_Word32) \
+#define WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(a, b, c) \
+    ((WEBRTC_SPL_MUL_16_16(a, b) + ((WebRtc_Word32) \
                                   (((WebRtc_Word32)1) << ((c) - 1)))) >> (c))
-#define WEBRTC_SPL_MUL_16_16_RSFT_WITH_FIXROUND(a, b)\
+#define WEBRTC_SPL_MUL_16_16_RSFT_WITH_FIXROUND(a, b) \
     ((WEBRTC_SPL_MUL_16_16(a, b) + ((WebRtc_Word32) (1 << 14))) >> 15)
 
 // C + the 32 most significant bits of A * B
-#define WEBRTC_SPL_SCALEDIFF32(A, B, C)                                 \
-  (C + (B >> 16) * A + (((WebRtc_UWord32)(0x0000FFFF & B) * A) >> 16))
+#define WEBRTC_SPL_SCALEDIFF32(A, B, C) \
+    (C + (B >> 16) * A + (((WebRtc_UWord32)(0x0000FFFF & B) * A) >> 16))
 
 #define WEBRTC_SPL_ADD_SAT_W32(a, b)    WebRtcSpl_AddSatW32(a, b)
 #define WEBRTC_SPL_SAT(a, b, c)         (b > a ? a : b < c ? c : b)
@@ -146,10 +138,10 @@
 #define WEBRTC_SPL_IS_NEG(a)            ((a) & 0x80000000)
 // Shifting with negative numbers allowed
 // Positive means left shift
-#define WEBRTC_SPL_SHIFT_W16(x, c)              \
-  (((c) >= 0) ? ((x) << (c)) : ((x) >> (-(c))))
-#define WEBRTC_SPL_SHIFT_W32(x, c)              \
-  (((c) >= 0) ? ((x) << (c)) : ((x) >> (-(c))))
+#define WEBRTC_SPL_SHIFT_W16(x, c) \
+    (((c) >= 0) ? ((x) << (c)) : ((x) >> (-(c))))
+#define WEBRTC_SPL_SHIFT_W32(x, c) \
+    (((c) >= 0) ? ((x) << (c)) : ((x) >> (-(c))))
 
 // Shifting with negative numbers not allowed
 // We cannot do casting here due to signed/unsigned problem
@@ -166,44 +158,24 @@
 #define WEBRTC_SPL_VNEW(t, n)           (t *) malloc (sizeof (t) * (n))
 #define WEBRTC_SPL_FREE                 free
 
-#define WEBRTC_SPL_RAND(a)\
-  ((WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT((a), 18816, 7) & 0x00007fff))
+#define WEBRTC_SPL_RAND(a) \
+    ((WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT((a), 18816, 7) & 0x00007fff))
 
 #ifdef __cplusplus
 extern "C"
 {
 #endif
 
-#define WEBRTC_SPL_MEMCPY_W8(v1, v2, length)    \
-  memcpy(v1, v2, (length) * sizeof(char))
-#define WEBRTC_SPL_MEMCPY_W16(v1, v2, length)           \
-  memcpy(v1, v2, (length) * sizeof(WebRtc_Word16))
+#define WEBRTC_SPL_MEMCPY_W8(v1, v2, length) \
+   memcpy(v1, v2, (length) * sizeof(char))
+#define WEBRTC_SPL_MEMCPY_W16(v1, v2, length) \
+   memcpy(v1, v2, (length) * sizeof(WebRtc_Word16))
 
-#define WEBRTC_SPL_MEMMOVE_W16(v1, v2, length)          \
-  memmove(v1, v2, (length) * sizeof(WebRtc_Word16))
+#define WEBRTC_SPL_MEMMOVE_W16(v1, v2, length) \
+   memmove(v1, v2, (length) * sizeof(WebRtc_Word16))
 
-// Trigonometric tables used for quick lookup
-// default declarations
-extern WebRtc_Word16 WebRtcSpl_kCosTable[];
-extern WebRtc_Word16 WebRtcSpl_kSinTable[];
-extern WebRtc_Word16 WebRtcSpl_kSinTable1024[];
-// Hanning table
-extern WebRtc_Word16 WebRtcSpl_kHanningTable[];
-// Random table
-extern WebRtc_Word16 WebRtcSpl_kRandNTable[];
-
-#ifndef WEBRTC_SPL_INLINE_CALLS
-WebRtc_Word16 WebRtcSpl_AddSatW16(WebRtc_Word16 var1, WebRtc_Word16 var2);
-WebRtc_Word16 WebRtcSpl_SubSatW16(WebRtc_Word16 var1, WebRtc_Word16 var2);
-WebRtc_Word32 WebRtcSpl_AddSatW32(WebRtc_Word32 var1, WebRtc_Word32 var2);
-WebRtc_Word32 WebRtcSpl_SubSatW32(WebRtc_Word32 var1, WebRtc_Word32 var2);
-WebRtc_Word16 WebRtcSpl_GetSizeInBits(WebRtc_UWord32 value);
-int WebRtcSpl_NormW32(WebRtc_Word32 value);
-int WebRtcSpl_NormW16(WebRtc_Word16 value);
-int WebRtcSpl_NormU32(WebRtc_UWord32 value);
-#else
+// inline functions:
 #include "spl_inl.h"
-#endif
 
 // Get SPL Version
 WebRtc_Word16 WebRtcSpl_get_version(char* version,
@@ -240,7 +212,7 @@
 
 // Minimum and maximum operations. Implementation in min_max_operations.c.
 // Descriptions at bottom of file.
-WebRtc_Word16 WebRtcSpl_MaxAbsValueW16(G_CONST WebRtc_Word16* vector,
+WebRtc_Word16 WebRtcSpl_MaxAbsValueW16(const WebRtc_Word16* vector,
                                        WebRtc_Word16 length);
 WebRtc_Word32 WebRtcSpl_MaxAbsValueW32(G_CONST WebRtc_Word32* vector,
                                        WebRtc_Word16 length);
@@ -431,14 +403,6 @@
 // FFT operations
 int WebRtcSpl_ComplexFFT(WebRtc_Word16 vector[], int stages, int mode);
 int WebRtcSpl_ComplexIFFT(WebRtc_Word16 vector[], int stages, int mode);
-#if (defined ARM9E_GCC) || (defined ARM_WINM) || (defined ANDROID_AECOPT)
-int WebRtcSpl_ComplexFFT2(WebRtc_Word16 in_vector[],
-                          WebRtc_Word16 out_vector[],
-                          int stages, int mode);
-int WebRtcSpl_ComplexIFFT2(WebRtc_Word16 in_vector[],
-                           WebRtc_Word16 out_vector[],
-                           int stages, int mode);
-#endif
 void WebRtcSpl_ComplexBitReverse(WebRtc_Word16 vector[], int stages);
 // End: FFT operations
 
@@ -1575,43 +1539,6 @@
 //                    value of -1, indicating error.
 //
 
-#if (defined ARM9E_GCC) || (defined ARM_WINM) || (defined ANDROID_AECOPT)
-//
-// WebRtcSpl_ComplexIFFT2(...)
-//
-// Complex or Real inverse FFT, for ARM processor only
-//
-// Computes a 2^|stages|-point FFT on the input vector, which can be or not be
-// in bit-reversed order. If it is bit-reversed, the original content of the
-// vector could be overwritten by the output by setting the first two arguments
-// the same. With X as the input complex vector, y as the output complex vector
-// and with M = 2^|stages|, the following is computed:
-//
-//        M-1
-// y(k) = sum[X(i)*[cos(2*pi*i*k/M) + j*sin(2*pi*i*k/M)]]
-//        i=0
-//
-// The implementations are optimized for speed, not for code size. It uses the
-// decimation-in-time algorithm with radix-2 butterfly technique.
-//
-// Arguments:
-//      - in_vector     : In pointer to complex vector containing 2^|stages|
-//                        real elements interleaved with 2^|stages| imaginary
-//                        elements. [ReImReImReIm....]
-//                        The elements are in Q(-scale) domain.
-//      - out_vector    : Output pointer to vector containing 2^|stages| real
-//                        elements interleaved with 2^|stages| imaginary
-//                        elements. [ReImReImReIm....]
-//                        The output is in the Q0 domain.
-//      - stages        : Number of FFT stages. Must be at least 3 and at most
-//                        10.
-//      - mode          : Dummy input.
-//
-// Return value         : The scale parameter is always 0, except if N>1024,
-//                        which returns a scale value of -1, indicating error.
-//
-#endif
-
 //
 // WebRtcSpl_ComplexFFT(...)
 //
@@ -1657,42 +1584,6 @@
 //                    which returns a scale value of -1, indicating error.
 //
 
-#if (defined ARM9E_GCC) || (defined ARM_WINM) || (defined ANDROID_AECOPT)
-//
-// WebRtcSpl_ComplexFFT2(...)
-//
-// Complex or Real FFT, for ARM processor only
-//
-// Computes a 2^|stages|-point FFT on the input vector, which can be or not be
-// in bit-reversed order. If it is bit-reversed, the original content of the
-// vector could be overwritten by the output by setting the first two arguments
-// the same. With x as the input complex vector, Y as the output complex vector
-// and with M = 2^|stages|, the following is computed:
-//
-//              M-1
-// Y(k) = 1/M * sum[x(i)*[cos(2*pi*i*k/M) + j*sin(2*pi*i*k/M)]]
-//              i=0
-//
-// The implementations are optimized for speed, not for code size. It uses the
-// decimation-in-time algorithm with radix-2 butterfly technique.
-//
-// Arguments:
-//      - in_vector     : In pointer to complex vector containing 2^|stages|
-//                        real elements interleaved with 2^|stages| imaginary
-//                        elements. [ReImReImReIm....]
-//      - out_vector    : Output pointer to vector containing 2^|stages| real
-//                        elements interleaved with 2^|stages| imaginary
-//                        elements. [ReImReImReIm....]
-//                        The output is in the Q0 domain.
-//      - stages        : Number of FFT stages. Must be at least 3 and at most
-//                        10.
-//      - mode          : Dummy input
-//
-// Return value         : The scale parameter is always 0, except if N>1024,
-//                        which returns a scale value of -1, indicating error.
-//
-#endif
-
 //
 // WebRtcSpl_ComplexBitReverse(...)
 //
@@ -1758,6 +1649,30 @@
 //      - out_data      : Super-wideband speech signal, 0-16 kHz
 //
 
+// WebRtc_Word16 WebRtcSpl_SatW32ToW16(...)
+//
+// This function saturates a 32-bit word into a 16-bit word.
+// 
+// Input:
+//      - value32   : The value of a 32-bit word.
+//
+// Output:
+//      - out16     : the saturated 16-bit word.
+//
+
+// int32_t WebRtc_MulAccumW16(...)
+//
+// This function multiply a 16-bit word by a 16-bit word, and accumulate this
+// value to a 32-bit integer.
+// 
+// Input:
+//      - a    : The value of the first 16-bit word.
+//      - b    : The value of the second 16-bit word.
+//      - c    : The value of an 32-bit integer.
+//
+// Return Value: The value of a * b + c.
+//
+
 // WebRtc_Word16 WebRtcSpl_get_version(...)
 //
 // This function gives the version string of the Signal Processing Library.
diff --git a/src/common_audio/signal_processing/include/spl_inl.h b/src/common_audio/signal_processing/include/spl_inl.h
new file mode 100644
index 0000000..23b3209
--- /dev/null
+++ b/src/common_audio/signal_processing/include/spl_inl.h
@@ -0,0 +1,159 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+// This header file includes the inline functions in
+// the fix point signal processing library.
+
+#ifndef WEBRTC_SPL_SPL_INL_H_
+#define WEBRTC_SPL_SPL_INL_H_
+
+#ifdef WEBRTC_ARCH_ARM_V7A
+#include "spl_inl_armv7.h"
+#else
+
+static __inline WebRtc_Word16 WebRtcSpl_SatW32ToW16(WebRtc_Word32 value32) {
+  WebRtc_Word16 out16 = (WebRtc_Word16) value32;
+
+  if (value32 > 32767)
+    out16 = 32767;
+  else if (value32 < -32768)
+    out16 = -32768;
+
+  return out16;
+}
+
+static __inline WebRtc_Word16 WebRtcSpl_AddSatW16(WebRtc_Word16 a,
+                                                  WebRtc_Word16 b) {
+  return WebRtcSpl_SatW32ToW16((WebRtc_Word32) a + (WebRtc_Word32) b);
+}
+
+static __inline WebRtc_Word32 WebRtcSpl_AddSatW32(WebRtc_Word32 l_var1,
+                                                  WebRtc_Word32 l_var2) {
+  WebRtc_Word32 l_sum;
+
+  // perform long addition
+  l_sum = l_var1 + l_var2;
+
+  // check for under or overflow
+  if (WEBRTC_SPL_IS_NEG(l_var1)) {
+    if (WEBRTC_SPL_IS_NEG(l_var2) && !WEBRTC_SPL_IS_NEG(l_sum)) {
+        l_sum = (WebRtc_Word32)0x80000000;
+    }
+  } else {
+    if (!WEBRTC_SPL_IS_NEG(l_var2) && WEBRTC_SPL_IS_NEG(l_sum)) {
+        l_sum = (WebRtc_Word32)0x7FFFFFFF;
+    }
+  }
+
+  return l_sum;
+}
+
+static __inline WebRtc_Word16 WebRtcSpl_SubSatW16(WebRtc_Word16 var1,
+                                                  WebRtc_Word16 var2) {
+  return WebRtcSpl_SatW32ToW16((WebRtc_Word32) var1 - (WebRtc_Word32) var2);
+}
+
+static __inline WebRtc_Word32 WebRtcSpl_SubSatW32(WebRtc_Word32 l_var1,
+                                                  WebRtc_Word32 l_var2) {
+  WebRtc_Word32 l_diff;
+
+  // perform subtraction
+  l_diff = l_var1 - l_var2;
+
+  // check for underflow
+  if ((l_var1 < 0) && (l_var2 > 0) && (l_diff > 0))
+    l_diff = (WebRtc_Word32)0x80000000;
+  // check for overflow
+  if ((l_var1 > 0) && (l_var2 < 0) && (l_diff < 0))
+    l_diff = (WebRtc_Word32)0x7FFFFFFF;
+
+  return l_diff;
+}
+
+static __inline WebRtc_Word16 WebRtcSpl_GetSizeInBits(WebRtc_UWord32 n) {
+  int bits;
+
+  if (0xFFFF0000 & n) {
+    bits = 16;
+  } else {
+    bits = 0;
+  }
+  if (0x0000FF00 & (n >> bits)) bits += 8;
+  if (0x000000F0 & (n >> bits)) bits += 4;
+  if (0x0000000C & (n >> bits)) bits += 2;
+  if (0x00000002 & (n >> bits)) bits += 1;
+  if (0x00000001 & (n >> bits)) bits += 1;
+
+  return bits;
+}
+
+static __inline int WebRtcSpl_NormW32(WebRtc_Word32 a) {
+  int zeros;
+
+  if (a <= 0) a ^= 0xFFFFFFFF;
+
+  if (!(0xFFFF8000 & a)) {
+    zeros = 16;
+  } else {
+    zeros = 0;
+  }
+  if (!(0xFF800000 & (a << zeros))) zeros += 8;
+  if (!(0xF8000000 & (a << zeros))) zeros += 4;
+  if (!(0xE0000000 & (a << zeros))) zeros += 2;
+  if (!(0xC0000000 & (a << zeros))) zeros += 1;
+
+  return zeros;
+}
+
+static __inline int WebRtcSpl_NormU32(WebRtc_UWord32 a) {
+  int zeros;
+
+  if (a == 0) return 0;
+
+  if (!(0xFFFF0000 & a)) {
+    zeros = 16;
+  } else {
+    zeros = 0;
+  }
+  if (!(0xFF000000 & (a << zeros))) zeros += 8;
+  if (!(0xF0000000 & (a << zeros))) zeros += 4;
+  if (!(0xC0000000 & (a << zeros))) zeros += 2;
+  if (!(0x80000000 & (a << zeros))) zeros += 1;
+
+  return zeros;
+}
+
+static __inline int WebRtcSpl_NormW16(WebRtc_Word16 a) {
+  int zeros;
+
+  if (a <= 0) a ^= 0xFFFF;
+
+  if (!(0xFF80 & a)) {
+    zeros = 8;
+  } else {
+    zeros = 0;
+  }
+  if (!(0xF800 & (a << zeros))) zeros += 4;
+  if (!(0xE000 & (a << zeros))) zeros += 2;
+  if (!(0xC000 & (a << zeros))) zeros += 1;
+
+  return zeros;
+}
+
+static __inline int32_t WebRtc_MulAccumW16(int16_t a,
+                                          int16_t b,
+                                          int32_t c) {
+  return (a * b + c);
+}
+
+#endif  // WEBRTC_ARCH_ARM_V7A
+
+#endif  // WEBRTC_SPL_SPL_INL_H_
diff --git a/src/common_audio/signal_processing/include/spl_inl_armv7.h b/src/common_audio/signal_processing/include/spl_inl_armv7.h
new file mode 100644
index 0000000..689c2ba
--- /dev/null
+++ b/src/common_audio/signal_processing/include/spl_inl_armv7.h
@@ -0,0 +1,137 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+// This header file includes the inline functions for ARM processors in
+// the fix point signal processing library.
+
+#ifndef WEBRTC_SPL_SPL_INL_ARMV7_H_
+#define WEBRTC_SPL_SPL_INL_ARMV7_H_
+
+static __inline WebRtc_Word32 WEBRTC_SPL_MUL_16_32_RSFT16(WebRtc_Word16 a,
+                                                          WebRtc_Word32 b) {
+  WebRtc_Word32 tmp;
+  __asm__("smulwb %0, %1, %2":"=r"(tmp):"r"(b), "r"(a));
+  return tmp;
+}
+
+static __inline WebRtc_Word32 WEBRTC_SPL_MUL_32_32_RSFT32(WebRtc_Word16 a,
+                                                          WebRtc_Word16 b,
+                                                          WebRtc_Word32 c) {
+  WebRtc_Word32 tmp;
+  __asm__("pkhbt %0, %1, %2, lsl #16" : "=r"(tmp) : "r"(b), "r"(a));
+  __asm__("smmul %0, %1, %2":"=r"(tmp):"r"(tmp), "r"(c));
+  return tmp;
+}
+
+static __inline WebRtc_Word32 WEBRTC_SPL_MUL_32_32_RSFT32BI(WebRtc_Word32 a,
+                                                            WebRtc_Word32 b) {
+  WebRtc_Word32 tmp;
+  __asm__("smmul %0, %1, %2":"=r"(tmp):"r"(a), "r"(b));
+  return tmp;
+}
+
+static __inline WebRtc_Word32 WEBRTC_SPL_MUL_16_16(WebRtc_Word16 a,
+                                                   WebRtc_Word16 b) {
+  WebRtc_Word32 tmp;
+  __asm__("smulbb %0, %1, %2":"=r"(tmp):"r"(a), "r"(b));
+  return tmp;
+}
+
+static __inline int32_t WebRtc_MulAccumW16(int16_t a,
+                                          int16_t b,
+                                          int32_t c) {
+  int32_t tmp = 0;
+  __asm__("smlabb %0, %1, %2, %3":"=r"(tmp):"r"(a), "r"(b), "r"(c));
+  return tmp;
+}
+
+static __inline WebRtc_Word16 WebRtcSpl_AddSatW16(WebRtc_Word16 a,
+                                                  WebRtc_Word16 b) {
+  WebRtc_Word32 s_sum;
+
+  __asm__("qadd16 %0, %1, %2":"=r"(s_sum):"r"(a), "r"(b));
+
+  return (WebRtc_Word16) s_sum;
+}
+
+static __inline WebRtc_Word32 WebRtcSpl_AddSatW32(WebRtc_Word32 l_var1,
+                                                  WebRtc_Word32 l_var2) {
+  WebRtc_Word32 l_sum;
+
+  __asm__("qadd %0, %1, %2":"=r"(l_sum):"r"(l_var1), "r"(l_var2));
+
+  return l_sum;
+}
+
+static __inline WebRtc_Word16 WebRtcSpl_SubSatW16(WebRtc_Word16 var1,
+                                                  WebRtc_Word16 var2) {
+  WebRtc_Word32 s_sub;
+
+  __asm__("qsub16 %0, %1, %2":"=r"(s_sub):"r"(var1), "r"(var2));
+
+  return (WebRtc_Word16)s_sub;
+}
+
+static __inline WebRtc_Word32 WebRtcSpl_SubSatW32(WebRtc_Word32 l_var1,
+                                                  WebRtc_Word32 l_var2) {
+  WebRtc_Word32 l_sub;
+
+  __asm__("qsub %0, %1, %2":"=r"(l_sub):"r"(l_var1), "r"(l_var2));
+
+  return l_sub;
+}
+
+static __inline WebRtc_Word16 WebRtcSpl_GetSizeInBits(WebRtc_UWord32 n) {
+  WebRtc_Word32 tmp;
+
+  __asm__("clz %0, %1":"=r"(tmp):"r"(n));
+
+  return (WebRtc_Word16)(32 - tmp);
+}
+
+static __inline int WebRtcSpl_NormW32(WebRtc_Word32 a) {
+  WebRtc_Word32 tmp;
+
+  if (a <= 0) a ^= 0xFFFFFFFF;
+
+  __asm__("clz %0, %1":"=r"(tmp):"r"(a));
+
+  return tmp - 1;
+}
+
+static __inline int WebRtcSpl_NormU32(WebRtc_UWord32 a) {
+  int tmp;
+
+  if (a == 0) return 0;
+
+  __asm__("clz %0, %1":"=r"(tmp):"r"(a));
+
+  return tmp;
+}
+
+static __inline int WebRtcSpl_NormW16(WebRtc_Word16 a) {
+  WebRtc_Word32 tmp;
+
+  if (a <= 0) a ^= 0xFFFFFFFF;
+
+  __asm__("clz %0, %1":"=r"(tmp):"r"(a));
+
+  return tmp - 17;
+}
+
+static __inline WebRtc_Word16 WebRtcSpl_SatW32ToW16(WebRtc_Word32 value32) {
+  WebRtc_Word16 out16;
+
+  __asm__("ssat %r0, #16, %r1" : "=r"(out16) : "r"(value32));
+
+  return out16;
+}
+#endif  // WEBRTC_SPL_SPL_INL_ARMV7_H_
diff --git a/src/common_audio/signal_processing_library/main/source/levinson_durbin.c b/src/common_audio/signal_processing/levinson_durbin.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/levinson_durbin.c
rename to src/common_audio/signal_processing/levinson_durbin.c
diff --git a/src/common_audio/signal_processing_library/main/source/lpc_to_refl_coef.c b/src/common_audio/signal_processing/lpc_to_refl_coef.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/lpc_to_refl_coef.c
rename to src/common_audio/signal_processing/lpc_to_refl_coef.c
diff --git a/src/common_audio/signal_processing_library/main/source/min_max_operations.c b/src/common_audio/signal_processing/min_max_operations.c
similarity index 85%
rename from src/common_audio/signal_processing_library/main/source/min_max_operations.c
rename to src/common_audio/signal_processing/min_max_operations.c
index cf5e9a7..57eaff7 100644
--- a/src/common_audio/signal_processing_library/main/source/min_max_operations.c
+++ b/src/common_audio/signal_processing/min_max_operations.c
@@ -28,8 +28,10 @@
 
 #include "signal_processing_library.h"
 
+#if !(defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM_NEON))
+
 // Maximum absolute value of word16 vector.
-WebRtc_Word16 WebRtcSpl_MaxAbsValueW16(G_CONST WebRtc_Word16 *vector, WebRtc_Word16 length)
+WebRtc_Word16 WebRtcSpl_MaxAbsValueW16(const WebRtc_Word16 *vector, WebRtc_Word16 length)
 {
     WebRtc_Word32 tempMax = 0;
     WebRtc_Word32 absVal;
@@ -37,49 +39,6 @@
     int i;
     G_CONST WebRtc_Word16 *tmpvector = vector;
 
-#ifdef _ARM_OPT_
-#pragma message("NOTE: _ARM_OPT_ optimizations are used")
-
-    WebRtc_Word16 len4 = (length >> 2) << 2;
-
-    for (i = 0; i < len4; i = i + 4)
-    {
-        absVal = WEBRTC_SPL_ABS_W32((*tmpvector));
-        if (absVal > tempMax)
-        {
-            tempMax = absVal;
-        }
-        tmpvector++;
-        absVal = WEBRTC_SPL_ABS_W32((*tmpvector));
-        if (absVal > tempMax)
-        {
-            tempMax = absVal;
-        }
-        tmpvector++;
-        absVal = WEBRTC_SPL_ABS_W32((*tmpvector));
-        if (absVal > tempMax)
-        {
-            tempMax = absVal;
-        }
-        tmpvector++;
-        absVal = WEBRTC_SPL_ABS_W32((*tmpvector));
-        if (absVal > tempMax)
-        {
-            tempMax = absVal;
-        }
-        tmpvector++;
-    }
-
-    for (i = len4; i < len; i++)
-    {
-        absVal = WEBRTC_SPL_ABS_W32((*tmpvector));
-        if (absVal > tempMax)
-        {
-            tempMax = absVal;
-        }
-        tmpvector++;
-    }
-#else
     for (i = 0; i < length; i++)
     {
         absVal = WEBRTC_SPL_ABS_W32((*tmpvector));
@@ -91,9 +50,10 @@
     }
     totMax = (WebRtc_Word16)WEBRTC_SPL_MIN(tempMax, WEBRTC_SPL_WORD16_MAX);
     return totMax;
-#endif
 }
 
+#endif
+
 // Index of maximum absolute value in a  word16 vector.
 WebRtc_Word16 WebRtcSpl_MaxAbsIndexW16(G_CONST WebRtc_Word16* vector, WebRtc_Word16 length)
 {
diff --git a/src/common_audio/signal_processing/min_max_operations_neon.c b/src/common_audio/signal_processing/min_max_operations_neon.c
new file mode 100644
index 0000000..158bcc1
--- /dev/null
+++ b/src/common_audio/signal_processing/min_max_operations_neon.c
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if (defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM_NEON))
+
+#include <arm_neon.h>
+
+#include "signal_processing_library.h"
+
+// Maximum absolute value of word16 vector.
+WebRtc_Word16 WebRtcSpl_MaxAbsValueW16(const WebRtc_Word16* vector,
+                                       WebRtc_Word16 length) {
+  WebRtc_Word32 temp_max = 0;
+  WebRtc_Word32 abs_val;
+  WebRtc_Word16 tot_max;
+  int i;
+
+  __asm__("vmov.i16 d25, #0" : : : "d25");
+
+  for (i = 0; i < length - 7; i += 8) {
+    __asm__("vld1.16 {d26, d27}, [%0]" : : "r"(&vector[i]) : "q13");
+    __asm__("vabs.s16 q13, q13" : : : "q13");
+    __asm__("vpmax.s16 d26, d27" : : : "q13");
+    __asm__("vpmax.s16 d25, d26" : : : "d25", "d26");
+  }
+  __asm__("vpmax.s16 d25, d25" : : : "d25");
+  __asm__("vpmax.s16 d25, d25" : : : "d25");
+  __asm__("vmov.s16 %0, d25[0]" : "=r"(temp_max): : "d25");
+
+  for (; i < length; i++) {
+    abs_val = WEBRTC_SPL_ABS_W32((vector[i]));
+    if (abs_val > temp_max) {
+      temp_max = abs_val;
+    }
+  }
+  tot_max = (WebRtc_Word16)WEBRTC_SPL_MIN(temp_max, WEBRTC_SPL_WORD16_MAX);
+  return tot_max;
+}
+
+#endif
diff --git a/src/common_audio/signal_processing_library/main/source/randn_table.c b/src/common_audio/signal_processing/randomization_functions.c
similarity index 83%
rename from src/common_audio/signal_processing_library/main/source/randn_table.c
rename to src/common_audio/signal_processing/randomization_functions.c
index 734fa79..04271ad 100644
--- a/src/common_audio/signal_processing_library/main/source/randn_table.c
+++ b/src/common_audio/signal_processing/randomization_functions.c
@@ -10,14 +10,19 @@
 
 
 /*
- * Table with 512 samples from a normal distribution with mean 1 and std 1
- * The values are shifted up 13 steps (multiplied by 8192)
+ * This file contains implementations of the randomization functions
+ * WebRtcSpl_IncreaseSeed()
+ * WebRtcSpl_RandU()
+ * WebRtcSpl_RandN()
+ * WebRtcSpl_RandUArray()
+ *
+ * The description header can be found in signal_processing_library.h
+ *
  */
 
 #include "signal_processing_library.h"
 
-WebRtc_Word16 WebRtcSpl_kRandNTable[] =
-{
+static const WebRtc_Word16 kRandNTable[] = {
     9178,    -7260,       40,    10189,     4894,    -3531,   -13779,    14764,
    -4008,    -8884,    -8990,     1008,     7368,     5184,     3251,    -5817,
    -9786,     5963,     1770,     8066,    -7135,    10772,    -2298,     1361,
@@ -83,3 +88,32 @@
     2406,     7703,     -951,    11196,     -564,     3406,     2217,     4806,
     2374,    -5797,    11839,     8940,   -11874,    18213,     2855,    10492
 };
+
+WebRtc_UWord32 WebRtcSpl_IncreaseSeed(WebRtc_UWord32 *seed)
+{
+    seed[0] = (seed[0] * ((WebRtc_Word32)69069) + 1) & (WEBRTC_SPL_MAX_SEED_USED - 1);
+    return seed[0];
+}
+
+WebRtc_Word16 WebRtcSpl_RandU(WebRtc_UWord32 *seed)
+{
+    return (WebRtc_Word16)(WebRtcSpl_IncreaseSeed(seed) >> 16);
+}
+
+WebRtc_Word16 WebRtcSpl_RandN(WebRtc_UWord32 *seed)
+{
+    return kRandNTable[WebRtcSpl_IncreaseSeed(seed) >> 23];
+}
+
+// Creates an array of uniformly distributed variables
+WebRtc_Word16 WebRtcSpl_RandUArray(WebRtc_Word16* vector,
+                                   WebRtc_Word16 vector_length,
+                                   WebRtc_UWord32* seed)
+{
+    int i;
+    for (i = 0; i < vector_length; i++)
+    {
+        vector[i] = WebRtcSpl_RandU(seed);
+    }
+    return vector_length;
+}
diff --git a/src/common_audio/signal_processing_library/main/source/refl_coef_to_lpc.c b/src/common_audio/signal_processing/refl_coef_to_lpc.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/refl_coef_to_lpc.c
rename to src/common_audio/signal_processing/refl_coef_to_lpc.c
diff --git a/src/common_audio/signal_processing_library/main/source/resample.c b/src/common_audio/signal_processing/resample.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/resample.c
rename to src/common_audio/signal_processing/resample.c
diff --git a/src/common_audio/signal_processing_library/main/source/resample_48khz.c b/src/common_audio/signal_processing/resample_48khz.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/resample_48khz.c
rename to src/common_audio/signal_processing/resample_48khz.c
diff --git a/src/common_audio/signal_processing/resample_by_2.c b/src/common_audio/signal_processing/resample_by_2.c
new file mode 100644
index 0000000..e239db7
--- /dev/null
+++ b/src/common_audio/signal_processing/resample_by_2.c
@@ -0,0 +1,181 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the resampling by two functions.
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "signal_processing_library.h"
+
+#ifdef WEBRTC_ARCH_ARM_V7A
+
+// allpass filter coefficients.
+static const WebRtc_UWord32 kResampleAllpass1[3] = {3284, 24441, 49528 << 15};
+static const WebRtc_UWord32 kResampleAllpass2[3] =
+  {12199, 37471 << 15, 60255 << 15};
+
+// Multiply two 32-bit values and accumulate to another input value.
+// Return: state + ((diff * tbl_value) >> 16)
+
+static __inline WebRtc_Word32 MUL_ACCUM_1(WebRtc_Word32 tbl_value,
+                                          WebRtc_Word32 diff,
+                                          WebRtc_Word32 state) {
+  WebRtc_Word32 result;
+  __asm__("smlawb %r0, %r1, %r2, %r3": "=r"(result): "r"(diff),
+                                       "r"(tbl_value), "r"(state));
+  return result;
+}
+
+// Multiply two 32-bit values and accumulate to another input value.
+// Return: Return: state + (((diff << 1) * tbl_value) >> 32)
+//
+// The reason to introduce this function is that, in case we can't use smlawb
+// instruction (in MUL_ACCUM_1) due to input value range, we can still use 
+// smmla to save some cycles.
+
+static __inline WebRtc_Word32 MUL_ACCUM_2(WebRtc_Word32 tbl_value,
+                                          WebRtc_Word32 diff,
+                                          WebRtc_Word32 state) {
+  WebRtc_Word32 result;
+  __asm__("smmla %r0, %r1, %r2, %r3": "=r"(result): "r"(diff << 1),
+                                      "r"(tbl_value), "r"(state));
+  return result;
+}
+
+#else
+
+// allpass filter coefficients.
+static const WebRtc_UWord16 kResampleAllpass1[3] = {3284, 24441, 49528};
+static const WebRtc_UWord16 kResampleAllpass2[3] = {12199, 37471, 60255};
+
+// Multiply a 32-bit value with a 16-bit value and accumulate to another input:
+#define MUL_ACCUM_1(a, b, c) WEBRTC_SPL_SCALEDIFF32(a, b, c)
+#define MUL_ACCUM_2(a, b, c) WEBRTC_SPL_SCALEDIFF32(a, b, c)
+
+#endif  // WEBRTC_ARCH_ARM_V7A
+
+
+// decimator
+void WebRtcSpl_DownsampleBy2(const WebRtc_Word16* in, const WebRtc_Word16 len,
+                             WebRtc_Word16* out, WebRtc_Word32* filtState) {
+  WebRtc_Word32 tmp1, tmp2, diff, in32, out32;
+  WebRtc_Word16 i;
+
+  register WebRtc_Word32 state0 = filtState[0];
+  register WebRtc_Word32 state1 = filtState[1];
+  register WebRtc_Word32 state2 = filtState[2];
+  register WebRtc_Word32 state3 = filtState[3];
+  register WebRtc_Word32 state4 = filtState[4];
+  register WebRtc_Word32 state5 = filtState[5];
+  register WebRtc_Word32 state6 = filtState[6];
+  register WebRtc_Word32 state7 = filtState[7];
+
+  for (i = (len >> 1); i > 0; i--) {
+    // lower allpass filter
+    in32 = (WebRtc_Word32)(*in++) << 10;
+    diff = in32 - state1;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state0);
+    state0 = in32;
+    diff = tmp1 - state2;
+    tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state1);
+    state1 = tmp1;
+    diff = tmp2 - state3;
+    state3 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state2);
+    state2 = tmp2;
+
+    // upper allpass filter
+    in32 = (WebRtc_Word32)(*in++) << 10;
+    diff = in32 - state5;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state4);
+    state4 = in32;
+    diff = tmp1 - state6;
+    tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state5);
+    state5 = tmp1;
+    diff = tmp2 - state7;
+    state7 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state6);
+    state6 = tmp2;
+
+    // add two allpass outputs, divide by two and round
+    out32 = (state3 + state7 + 1024) >> 11;
+
+    // limit amplitude to prevent wrap-around, and write to output array
+    *out++ = WebRtcSpl_SatW32ToW16(out32);
+  }
+
+  filtState[0] = state0;
+  filtState[1] = state1;
+  filtState[2] = state2;
+  filtState[3] = state3;
+  filtState[4] = state4;
+  filtState[5] = state5;
+  filtState[6] = state6;
+  filtState[7] = state7;
+}
+
+
+void WebRtcSpl_UpsampleBy2(const WebRtc_Word16* in, WebRtc_Word16 len,
+                           WebRtc_Word16* out, WebRtc_Word32* filtState) {
+  WebRtc_Word32 tmp1, tmp2, diff, in32, out32;
+  WebRtc_Word16 i;
+
+  register WebRtc_Word32 state0 = filtState[0];
+  register WebRtc_Word32 state1 = filtState[1];
+  register WebRtc_Word32 state2 = filtState[2];
+  register WebRtc_Word32 state3 = filtState[3];
+  register WebRtc_Word32 state4 = filtState[4];
+  register WebRtc_Word32 state5 = filtState[5];
+  register WebRtc_Word32 state6 = filtState[6];
+  register WebRtc_Word32 state7 = filtState[7];
+
+  for (i = len; i > 0; i--) {
+    // lower allpass filter
+    in32 = (WebRtc_Word32)(*in++) << 10;
+    diff = in32 - state1;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state0);
+    state0 = in32;
+    diff = tmp1 - state2;
+    tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state1);
+    state1 = tmp1;
+    diff = tmp2 - state3;
+    state3 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state2);
+    state2 = tmp2;
+
+    // round; limit amplitude to prevent wrap-around; write to output array
+    out32 = (state3 + 512) >> 10;
+    *out++ = WebRtcSpl_SatW32ToW16(out32);
+
+    // upper allpass filter
+    diff = in32 - state5;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state4);
+    state4 = in32;
+    diff = tmp1 - state6;
+    tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state5);
+    state5 = tmp1;
+    diff = tmp2 - state7;
+    state7 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state6);
+    state6 = tmp2;
+
+    // round; limit amplitude to prevent wrap-around; write to output array
+    out32 = (state7 + 512) >> 10;
+    *out++ = WebRtcSpl_SatW32ToW16(out32);
+  }
+
+  filtState[0] = state0;
+  filtState[1] = state1;
+  filtState[2] = state2;
+  filtState[3] = state3;
+  filtState[4] = state4;
+  filtState[5] = state5;
+  filtState[6] = state6;
+  filtState[7] = state7;
+}
diff --git a/src/common_audio/signal_processing_library/main/source/resample_by_2_internal.c b/src/common_audio/signal_processing/resample_by_2_internal.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/resample_by_2_internal.c
rename to src/common_audio/signal_processing/resample_by_2_internal.c
diff --git a/src/common_audio/signal_processing_library/main/source/resample_by_2_internal.h b/src/common_audio/signal_processing/resample_by_2_internal.h
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/resample_by_2_internal.h
rename to src/common_audio/signal_processing/resample_by_2_internal.h
diff --git a/src/common_audio/signal_processing_library/main/source/resample_fractional.c b/src/common_audio/signal_processing/resample_fractional.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/resample_fractional.c
rename to src/common_audio/signal_processing/resample_fractional.c
diff --git a/src/common_audio/signal_processing_library/main/source/spl.gyp b/src/common_audio/signal_processing/signal_processing.gypi
similarity index 69%
rename from src/common_audio/signal_processing_library/main/source/spl.gyp
rename to src/common_audio/signal_processing/signal_processing.gypi
index 9c052fc..c67bf7c 100644
--- a/src/common_audio/signal_processing_library/main/source/spl.gyp
+++ b/src/common_audio/signal_processing/signal_processing.gypi
@@ -7,33 +7,26 @@
 # be found in the AUTHORS file in the root of the source tree.
 
 {
-  'includes': [
-    '../../../../common_settings.gypi', # Common settings
-  ],
   'targets': [
     {
-      'target_name': 'spl',
+      'target_name': 'signal_processing',
       'type': '<(library)',
       'include_dirs': [
-        '../interface',
+        'include',
       ],
       'direct_dependent_settings': {
         'include_dirs': [
-          '../interface',
+          'include',
         ],
       },
       'sources': [
-        '../interface/signal_processing_library.h',
-        '../interface/spl_inl.h',
-        'add_sat_w16.c',
-        'add_sat_w32.c',
+        'include/signal_processing_library.h',
+        'include/spl_inl.h',
         'auto_corr_to_refl_coef.c',
         'auto_correlation.c',
         'complex_fft.c',
-        'complex_ifft.c',
         'complex_bit_reverse.c',
         'copy_set_operations.c',
-        'cos_table.c',
         'cross_correlation.c',
         'division_operations.c',
         'dot_product_with_scale.c',
@@ -44,16 +37,10 @@
         'filter_ma_fast_q12.c',
         'get_hanning_window.c',
         'get_scaling_square.c',
-        'get_size_in_bits.c',
-        'hanning_table.c',
         'ilbc_specific_functions.c',
         'levinson_durbin.c',
         'lpc_to_refl_coef.c',
         'min_max_operations.c',
-        'norm_u32.c',
-        'norm_w16.c',
-        'norm_w32.c',
-        'randn_table.c',
         'randomization_functions.c',
         'refl_coef_to_lpc.c',
         'resample.c',
@@ -62,19 +49,33 @@
         'resample_by_2_internal.c',
         'resample_by_2_internal.h',
         'resample_fractional.c',
-        'sin_table.c',
-        'sin_table_1024.c',
         'spl_sqrt.c',
         'spl_sqrt_floor.c',
         'spl_version.c',
         'splitting_filter.c',
         'sqrt_of_one_minus_x_squared.c',
-        'sub_sat_w16.c',
-        'sub_sat_w32.c',
         'vector_scaling_operations.c',
       ],
-    },
-  ],
+    }, # spl
+  ], # targets
+  'conditions': [
+    ['build_with_chromium==0', {
+      'targets': [
+        {
+          'target_name': 'signal_processing_unittests',
+          'type': 'executable',
+          'dependencies': [
+            'signal_processing',
+            '<(webrtc_root)/../test/test.gyp:test_support_main',
+            '<(webrtc_root)/../testing/gtest.gyp:gtest',
+          ],
+          'sources': [
+            'signal_processing_unittest.cc',
+          ],
+        }, # spl_unittests
+      ], # targets
+    }], # build_with_chromium
+  ], # conditions
 }
 
 # Local Variables:
diff --git a/src/common_audio/signal_processing/signal_processing_unittest.cc b/src/common_audio/signal_processing/signal_processing_unittest.cc
new file mode 100644
index 0000000..b2e8281
--- /dev/null
+++ b/src/common_audio/signal_processing/signal_processing_unittest.cc
@@ -0,0 +1,448 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "signal_processing_library.h"
+#include "gtest/gtest.h"
+
+class SplTest : public testing::Test {
+ protected:
+  virtual ~SplTest() {
+  }
+  void SetUp() {
+  }
+  void TearDown() {
+  }
+};
+
+TEST_F(SplTest, MacroTest) {
+    // Macros with inputs.
+    int A = 10;
+    int B = 21;
+    int a = -3;
+    int b = WEBRTC_SPL_WORD32_MAX;
+    int nr = 2;
+    int d_ptr2 = 0;
+
+    EXPECT_EQ(10, WEBRTC_SPL_MIN(A, B));
+    EXPECT_EQ(21, WEBRTC_SPL_MAX(A, B));
+
+    EXPECT_EQ(3, WEBRTC_SPL_ABS_W16(a));
+    EXPECT_EQ(3, WEBRTC_SPL_ABS_W32(a));
+    EXPECT_EQ(0, WEBRTC_SPL_GET_BYTE(&B, nr));
+    WEBRTC_SPL_SET_BYTE(&d_ptr2, 1, nr);
+    EXPECT_EQ(65536, d_ptr2);
+
+    EXPECT_EQ(-63, WEBRTC_SPL_MUL(a, B));
+    EXPECT_EQ(-2147483645, WEBRTC_SPL_MUL(a, b));
+    EXPECT_EQ(-2147483645u, WEBRTC_SPL_UMUL(a, b));
+    b = WEBRTC_SPL_WORD16_MAX >> 1;
+    EXPECT_EQ(65535u, WEBRTC_SPL_UMUL_RSFT16(a, b));
+    EXPECT_EQ(1073627139u, WEBRTC_SPL_UMUL_16_16(a, b));
+    EXPECT_EQ(16382u, WEBRTC_SPL_UMUL_16_16_RSFT16(a, b));
+    EXPECT_EQ(-49149u, WEBRTC_SPL_UMUL_32_16(a, b));
+    EXPECT_EQ(65535u, WEBRTC_SPL_UMUL_32_16_RSFT16(a, b));
+    EXPECT_EQ(-49149, WEBRTC_SPL_MUL_16_U16(a, b));
+
+    a = b;
+    b = -3;
+    EXPECT_EQ(-5461, WEBRTC_SPL_DIV(a, b));
+    EXPECT_EQ(0u, WEBRTC_SPL_UDIV(a, b));
+
+    EXPECT_EQ(-1, WEBRTC_SPL_MUL_16_32_RSFT16(a, b));
+    EXPECT_EQ(-1, WEBRTC_SPL_MUL_16_32_RSFT15(a, b));
+    EXPECT_EQ(-3, WEBRTC_SPL_MUL_16_32_RSFT14(a, b));
+    EXPECT_EQ(-24, WEBRTC_SPL_MUL_16_32_RSFT11(a, b));
+
+    int a32 = WEBRTC_SPL_WORD32_MAX;
+    int a32a = (WEBRTC_SPL_WORD32_MAX >> 16);
+    int a32b = (WEBRTC_SPL_WORD32_MAX & 0x0000ffff);
+    EXPECT_EQ(5, WEBRTC_SPL_MUL_32_32_RSFT32(a32a, a32b, A));
+    EXPECT_EQ(5, WEBRTC_SPL_MUL_32_32_RSFT32BI(a32, A));
+
+    EXPECT_EQ(-49149, WEBRTC_SPL_MUL_16_16(a, b));
+    EXPECT_EQ(-12288, WEBRTC_SPL_MUL_16_16_RSFT(a, b, 2));
+
+    EXPECT_EQ(-12287, WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(a, b, 2));
+    EXPECT_EQ(-1, WEBRTC_SPL_MUL_16_16_RSFT_WITH_FIXROUND(a, b));
+
+    EXPECT_EQ(16380, WEBRTC_SPL_ADD_SAT_W32(a, b));
+    EXPECT_EQ(21, WEBRTC_SPL_SAT(a, A, B));
+    EXPECT_EQ(21, WEBRTC_SPL_SAT(a, B, A));
+    EXPECT_EQ(-49149, WEBRTC_SPL_MUL_32_16(a, b));
+
+    EXPECT_EQ(16386, WEBRTC_SPL_SUB_SAT_W32(a, b));
+    EXPECT_EQ(16380, WEBRTC_SPL_ADD_SAT_W16(a, b));
+    EXPECT_EQ(16386, WEBRTC_SPL_SUB_SAT_W16(a, b));
+
+    EXPECT_TRUE(WEBRTC_SPL_IS_NEG(b));
+
+    // Shifting with negative numbers allowed
+    int shift_amount = 1;  // Workaround compiler warning using variable here.
+    // Positive means left shift
+    EXPECT_EQ(32766, WEBRTC_SPL_SHIFT_W16(a, shift_amount));
+    EXPECT_EQ(32766, WEBRTC_SPL_SHIFT_W32(a, shift_amount));
+
+    // Shifting with negative numbers not allowed
+    // We cannot do casting here due to signed/unsigned problem
+    EXPECT_EQ(8191, WEBRTC_SPL_RSHIFT_W16(a, 1));
+    EXPECT_EQ(32766, WEBRTC_SPL_LSHIFT_W16(a, 1));
+    EXPECT_EQ(8191, WEBRTC_SPL_RSHIFT_W32(a, 1));
+    EXPECT_EQ(32766, WEBRTC_SPL_LSHIFT_W32(a, 1));
+
+    EXPECT_EQ(8191, WEBRTC_SPL_RSHIFT_U16(a, 1));
+    EXPECT_EQ(32766, WEBRTC_SPL_LSHIFT_U16(a, 1));
+    EXPECT_EQ(8191u, WEBRTC_SPL_RSHIFT_U32(a, 1));
+    EXPECT_EQ(32766u, WEBRTC_SPL_LSHIFT_U32(a, 1));
+
+    EXPECT_EQ(1470, WEBRTC_SPL_RAND(A));
+}
+
+TEST_F(SplTest, InlineTest) {
+    WebRtc_Word16 a = 121;
+    WebRtc_Word16 b = -17;
+    WebRtc_Word32 A = 111121;
+    WebRtc_Word32 B = -1711;
+    char bVersion[8];
+
+    EXPECT_EQ(104, WebRtcSpl_AddSatW16(a, b));
+    EXPECT_EQ(138, WebRtcSpl_SubSatW16(a, b));
+
+    EXPECT_EQ(109410, WebRtcSpl_AddSatW32(A, B));
+    EXPECT_EQ(112832, WebRtcSpl_SubSatW32(A, B));
+
+    EXPECT_EQ(17, WebRtcSpl_GetSizeInBits(A));
+    EXPECT_EQ(14, WebRtcSpl_NormW32(A));
+    EXPECT_EQ(4, WebRtcSpl_NormW16(B));
+    EXPECT_EQ(15, WebRtcSpl_NormU32(A));
+
+    EXPECT_EQ(0, WebRtcSpl_get_version(bVersion, 8));
+}
+
+TEST_F(SplTest, MathOperationsTest) {
+    int A = 117;
+    WebRtc_Word32 num = 117;
+    WebRtc_Word32 den = -5;
+    WebRtc_UWord16 denU = 5;
+    EXPECT_EQ(10, WebRtcSpl_Sqrt(A));
+    EXPECT_EQ(10, WebRtcSpl_SqrtFloor(A));
+
+
+    EXPECT_EQ(-91772805, WebRtcSpl_DivResultInQ31(den, num));
+    EXPECT_EQ(-23, WebRtcSpl_DivW32W16ResW16(num, (WebRtc_Word16)den));
+    EXPECT_EQ(-23, WebRtcSpl_DivW32W16(num, (WebRtc_Word16)den));
+    EXPECT_EQ(23u, WebRtcSpl_DivU32U16(num, denU));
+    EXPECT_EQ(0, WebRtcSpl_DivW32HiLow(128, 0, 256));
+}
+
+TEST_F(SplTest, BasicArrayOperationsTest) {
+    const int kVectorSize = 4;
+    int B[] = {4, 12, 133, 1100};
+    WebRtc_UWord8 b8[kVectorSize];
+    WebRtc_Word16 b16[kVectorSize];
+    WebRtc_Word32 b32[kVectorSize];
+
+    WebRtc_UWord8 bTmp8[kVectorSize];
+    WebRtc_Word16 bTmp16[kVectorSize];
+    WebRtc_Word32 bTmp32[kVectorSize];
+
+    WebRtcSpl_MemSetW16(b16, 3, kVectorSize);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(3, b16[kk]);
+    }
+    EXPECT_EQ(kVectorSize, WebRtcSpl_ZerosArrayW16(b16, kVectorSize));
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(0, b16[kk]);
+    }
+    EXPECT_EQ(kVectorSize, WebRtcSpl_OnesArrayW16(b16, kVectorSize));
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(1, b16[kk]);
+    }
+    WebRtcSpl_MemSetW32(b32, 3, kVectorSize);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(3, b32[kk]);
+    }
+    EXPECT_EQ(kVectorSize, WebRtcSpl_ZerosArrayW32(b32, kVectorSize));
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(0, b32[kk]);
+    }
+    EXPECT_EQ(kVectorSize, WebRtcSpl_OnesArrayW32(b32, kVectorSize));
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(1, b32[kk]);
+    }
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        bTmp8[kk] = (WebRtc_Word8)kk;
+        bTmp16[kk] = (WebRtc_Word16)kk;
+        bTmp32[kk] = (WebRtc_Word32)kk;
+    }
+    WEBRTC_SPL_MEMCPY_W8(b8, bTmp8, kVectorSize);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(b8[kk], bTmp8[kk]);
+    }
+    WEBRTC_SPL_MEMCPY_W16(b16, bTmp16, kVectorSize);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(b16[kk], bTmp16[kk]);
+    }
+//    WEBRTC_SPL_MEMCPY_W32(b32, bTmp32, kVectorSize);
+//    for (int kk = 0; kk < kVectorSize; ++kk) {
+//        EXPECT_EQ(b32[kk], bTmp32[kk]);
+//    }
+    EXPECT_EQ(2, WebRtcSpl_CopyFromEndW16(b16, kVectorSize, 2, bTmp16));
+    for (int kk = 0; kk < 2; ++kk) {
+        EXPECT_EQ(kk+2, bTmp16[kk]);
+    }
+
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        b32[kk] = B[kk];
+        b16[kk] = (WebRtc_Word16)B[kk];
+    }
+    WebRtcSpl_VectorBitShiftW32ToW16(bTmp16, kVectorSize, b32, 1);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((B[kk]>>1), bTmp16[kk]);
+    }
+    WebRtcSpl_VectorBitShiftW16(bTmp16, kVectorSize, b16, 1);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((B[kk]>>1), bTmp16[kk]);
+    }
+    WebRtcSpl_VectorBitShiftW32(bTmp32, kVectorSize, b32, 1);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((B[kk]>>1), bTmp32[kk]);
+    }
+
+    WebRtcSpl_MemCpyReversedOrder(&bTmp16[3], b16, kVectorSize);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(b16[3-kk], bTmp16[kk]);
+    }
+}
+
+TEST_F(SplTest, MinMaxOperationsTest) {
+    const int kVectorSize = 4;
+    int B[] = {4, 12, 133, -1100};
+    WebRtc_Word16 b16[kVectorSize];
+    WebRtc_Word32 b32[kVectorSize];
+
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        b16[kk] = B[kk];
+        b32[kk] = B[kk];
+    }
+
+    EXPECT_EQ(1100, WebRtcSpl_MaxAbsValueW16(b16, kVectorSize));
+    EXPECT_EQ(1100, WebRtcSpl_MaxAbsValueW32(b32, kVectorSize));
+    EXPECT_EQ(133, WebRtcSpl_MaxValueW16(b16, kVectorSize));
+    EXPECT_EQ(133, WebRtcSpl_MaxValueW32(b32, kVectorSize));
+    EXPECT_EQ(3, WebRtcSpl_MaxAbsIndexW16(b16, kVectorSize));
+    EXPECT_EQ(2, WebRtcSpl_MaxIndexW16(b16, kVectorSize));
+    EXPECT_EQ(2, WebRtcSpl_MaxIndexW32(b32, kVectorSize));
+
+    EXPECT_EQ(-1100, WebRtcSpl_MinValueW16(b16, kVectorSize));
+    EXPECT_EQ(-1100, WebRtcSpl_MinValueW32(b32, kVectorSize));
+    EXPECT_EQ(3, WebRtcSpl_MinIndexW16(b16, kVectorSize));
+    EXPECT_EQ(3, WebRtcSpl_MinIndexW32(b32, kVectorSize));
+
+    EXPECT_EQ(0, WebRtcSpl_GetScalingSquare(b16, kVectorSize, 1));
+}
+
+TEST_F(SplTest, VectorOperationsTest) {
+    const int kVectorSize = 4;
+    int B[] = {4, 12, 133, 1100};
+    WebRtc_Word16 a16[kVectorSize];
+    WebRtc_Word16 b16[kVectorSize];
+    WebRtc_Word32 b32[kVectorSize];
+    WebRtc_Word16 bTmp16[kVectorSize];
+
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        a16[kk] = B[kk];
+        b16[kk] = B[kk];
+    }
+
+    WebRtcSpl_AffineTransformVector(bTmp16, b16, 3, 7, 2, kVectorSize);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((B[kk]*3+7)>>2, bTmp16[kk]);
+    }
+    WebRtcSpl_ScaleAndAddVectorsWithRound(b16, 3, b16, 2, 2, bTmp16, kVectorSize);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((B[kk]*3+B[kk]*2+2)>>2, bTmp16[kk]);
+    }
+
+    WebRtcSpl_AddAffineVectorToVector(bTmp16, b16, 3, 7, 2, kVectorSize);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(((B[kk]*3+B[kk]*2+2)>>2)+((b16[kk]*3+7)>>2), bTmp16[kk]);
+    }
+
+    WebRtcSpl_CrossCorrelation(b32, b16, bTmp16, kVectorSize, 2, 2, 0);
+    for (int kk = 0; kk < 2; ++kk) {
+        EXPECT_EQ(614236, b32[kk]);
+    }
+//    EXPECT_EQ(, WebRtcSpl_DotProduct(b16, bTmp16, 4));
+    EXPECT_EQ(306962, WebRtcSpl_DotProductWithScale(b16, b16, kVectorSize, 2));
+
+    WebRtcSpl_ScaleVector(b16, bTmp16, 13, kVectorSize, 2);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((b16[kk]*13)>>2, bTmp16[kk]);
+    }
+    WebRtcSpl_ScaleVectorWithSat(b16, bTmp16, 13, kVectorSize, 2);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((b16[kk]*13)>>2, bTmp16[kk]);
+    }
+    WebRtcSpl_ScaleAndAddVectors(a16, 13, 2, b16, 7, 2, bTmp16, kVectorSize);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(((a16[kk]*13)>>2)+((b16[kk]*7)>>2), bTmp16[kk]);
+    }
+
+    WebRtcSpl_AddVectorsAndShift(bTmp16, a16, b16, kVectorSize, 2);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(B[kk] >> 1, bTmp16[kk]);
+    }
+    WebRtcSpl_ReverseOrderMultArrayElements(bTmp16, a16, &b16[3], kVectorSize, 2);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((a16[kk]*b16[3-kk])>>2, bTmp16[kk]);
+    }
+    WebRtcSpl_ElementwiseVectorMult(bTmp16, a16, b16, kVectorSize, 6);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((a16[kk]*b16[kk])>>6, bTmp16[kk]);
+    }
+
+    WebRtcSpl_SqrtOfOneMinusXSquared(b16, kVectorSize, bTmp16);
+    for (int kk = 0; kk < kVectorSize - 1; ++kk) {
+        EXPECT_EQ(32767, bTmp16[kk]);
+    }
+    EXPECT_EQ(32749, bTmp16[kVectorSize - 1]);
+}
+
+TEST_F(SplTest, EstimatorsTest) {
+    const int kVectorSize = 4;
+    int B[] = {4, 12, 133, 1100};
+    WebRtc_Word16 b16[kVectorSize];
+    WebRtc_Word32 b32[kVectorSize];
+    WebRtc_Word16 bTmp16[kVectorSize];
+
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        b16[kk] = B[kk];
+        b32[kk] = B[kk];
+    }
+
+    EXPECT_EQ(0, WebRtcSpl_LevinsonDurbin(b32, b16, bTmp16, 2));
+}
+
+TEST_F(SplTest, FilterTest) {
+    const int kVectorSize = 4;
+    WebRtc_Word16 A[] = {1, 2, 33, 100};
+    WebRtc_Word16 A5[] = {1, 2, 33, 100, -5};
+    WebRtc_Word16 B[] = {4, 12, 133, 110};
+    WebRtc_Word16 b16[kVectorSize];
+    WebRtc_Word16 bTmp16[kVectorSize];
+    WebRtc_Word16 bTmp16Low[kVectorSize];
+    WebRtc_Word16 bState[kVectorSize];
+    WebRtc_Word16 bStateLow[kVectorSize];
+
+    WebRtcSpl_ZerosArrayW16(bState, kVectorSize);
+    WebRtcSpl_ZerosArrayW16(bStateLow, kVectorSize);
+
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        b16[kk] = A[kk];
+    }
+
+    // MA filters
+    WebRtcSpl_FilterMAFastQ12(b16, bTmp16, B, kVectorSize, kVectorSize);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        //EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
+    }
+    // AR filters
+    WebRtcSpl_FilterARFastQ12(b16, bTmp16, A, kVectorSize, kVectorSize);
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+//        EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
+    }
+    EXPECT_EQ(kVectorSize, WebRtcSpl_FilterAR(A5,
+                                              5,
+                                              b16,
+                                              kVectorSize,
+                                              bState,
+                                              kVectorSize,
+                                              bStateLow,
+                                              kVectorSize,
+                                              bTmp16,
+                                              bTmp16Low,
+                                              kVectorSize));
+}
+
+TEST_F(SplTest, RandTest) {
+    const int kVectorSize = 4;
+    WebRtc_Word16 BU[] = {3653, 12446, 8525, 30691};
+    WebRtc_Word16 b16[kVectorSize];
+    WebRtc_UWord32 bSeed = 100000;
+
+    EXPECT_EQ(464449057u, WebRtcSpl_IncreaseSeed(&bSeed));
+    EXPECT_EQ(31565, WebRtcSpl_RandU(&bSeed));
+    EXPECT_EQ(-9786, WebRtcSpl_RandN(&bSeed));
+    EXPECT_EQ(kVectorSize, WebRtcSpl_RandUArray(b16, kVectorSize, &bSeed));
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(BU[kk], b16[kk]);
+    }
+}
+
+TEST_F(SplTest, SignalProcessingTest) {
+    const int kVectorSize = 4;
+    int A[] = {1, 2, 33, 100};
+    WebRtc_Word16 b16[kVectorSize];
+    WebRtc_Word32 b32[kVectorSize];
+
+    WebRtc_Word16 bTmp16[kVectorSize];
+    WebRtc_Word32 bTmp32[kVectorSize];
+
+    int bScale = 0;
+
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        b16[kk] = A[kk];
+        b32[kk] = A[kk];
+    }
+
+    EXPECT_EQ(2, WebRtcSpl_AutoCorrelation(b16, kVectorSize, 1, bTmp32, &bScale));
+    WebRtcSpl_ReflCoefToLpc(b16, kVectorSize, bTmp16);
+//    for (int kk = 0; kk < kVectorSize; ++kk) {
+//        EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
+//    }
+    WebRtcSpl_LpcToReflCoef(bTmp16, kVectorSize, b16);
+//    for (int kk = 0; kk < kVectorSize; ++kk) {
+//        EXPECT_EQ(a16[kk], b16[kk]);
+//    }
+    WebRtcSpl_AutoCorrToReflCoef(b32, kVectorSize, bTmp16);
+//    for (int kk = 0; kk < kVectorSize; ++kk) {
+//        EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
+//    }
+    WebRtcSpl_GetHanningWindow(bTmp16, kVectorSize);
+//    for (int kk = 0; kk < kVectorSize; ++kk) {
+//        EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
+//    }
+
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        b16[kk] = A[kk];
+    }
+    EXPECT_EQ(11094 , WebRtcSpl_Energy(b16, kVectorSize, &bScale));
+    EXPECT_EQ(0, bScale);
+}
+
+TEST_F(SplTest, FFTTest) {
+    WebRtc_Word16 B[] = {1, 2, 33, 100,
+            2, 3, 34, 101,
+            3, 4, 35, 102,
+            4, 5, 36, 103};
+
+    EXPECT_EQ(0, WebRtcSpl_ComplexFFT(B, 3, 1));
+//    for (int kk = 0; kk < 16; ++kk) {
+//        EXPECT_EQ(A[kk], B[kk]);
+//    }
+    EXPECT_EQ(0, WebRtcSpl_ComplexIFFT(B, 3, 1));
+//    for (int kk = 0; kk < 16; ++kk) {
+//        EXPECT_EQ(A[kk], B[kk]);
+//    }
+    WebRtcSpl_ComplexBitReverse(B, 3);
+    for (int kk = 0; kk < 16; ++kk) {
+        //EXPECT_EQ(A[kk], B[kk]);
+    }
+}
diff --git a/src/common_audio/signal_processing_library/main/source/spl_sqrt.c b/src/common_audio/signal_processing/spl_sqrt.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/spl_sqrt.c
rename to src/common_audio/signal_processing/spl_sqrt.c
diff --git a/src/common_audio/signal_processing_library/main/source/spl_sqrt_floor.c b/src/common_audio/signal_processing/spl_sqrt_floor.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/spl_sqrt_floor.c
rename to src/common_audio/signal_processing/spl_sqrt_floor.c
diff --git a/src/common_audio/signal_processing_library/main/source/spl_version.c b/src/common_audio/signal_processing/spl_version.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/spl_version.c
rename to src/common_audio/signal_processing/spl_version.c
diff --git a/src/common_audio/signal_processing_library/main/source/splitting_filter.c b/src/common_audio/signal_processing/splitting_filter.c
similarity index 95%
rename from src/common_audio/signal_processing_library/main/source/splitting_filter.c
rename to src/common_audio/signal_processing/splitting_filter.c
index 98442f4..f1acf67 100644
--- a/src/common_audio/signal_processing_library/main/source/splitting_filter.c
+++ b/src/common_audio/signal_processing/splitting_filter.c
@@ -147,13 +147,11 @@
     {
         tmp = filter1[i] + filter2[i] + 1024;
         tmp = WEBRTC_SPL_RSHIFT_W32(tmp, 11);
-        low_band[i] = (WebRtc_Word16)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX,
-                tmp, WEBRTC_SPL_WORD16_MIN);
+        low_band[i] = WebRtcSpl_SatW32ToW16(tmp);
 
         tmp = filter1[i] - filter2[i] + 1024;
         tmp = WEBRTC_SPL_RSHIFT_W32(tmp, 11);
-        high_band[i] = (WebRtc_Word16)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX,
-                tmp, WEBRTC_SPL_WORD16_MIN);
+        high_band[i] = WebRtcSpl_SatW32ToW16(tmp);
     }
 }
 
@@ -191,10 +189,10 @@
     for (i = 0, k = 0; i < kBandFrameLength; i++)
     {
         tmp = WEBRTC_SPL_RSHIFT_W32(filter2[i] + 512, 10);
-        out_data[k++] = (WebRtc_Word16)WEBRTC_SPL_SAT(32767, tmp, -32768);
+        out_data[k++] = WebRtcSpl_SatW32ToW16(tmp);
 
         tmp = WEBRTC_SPL_RSHIFT_W32(filter1[i] + 512, 10);
-        out_data[k++] = (WebRtc_Word16)WEBRTC_SPL_SAT(32767, tmp, -32768);
+        out_data[k++] = WebRtcSpl_SatW32ToW16(tmp);
     }
 
 }
diff --git a/src/common_audio/signal_processing_library/main/source/sqrt_of_one_minus_x_squared.c b/src/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/sqrt_of_one_minus_x_squared.c
rename to src/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c
diff --git a/src/common_audio/signal_processing_library/main/source/vector_scaling_operations.c b/src/common_audio/signal_processing/vector_scaling_operations.c
similarity index 97%
rename from src/common_audio/signal_processing_library/main/source/vector_scaling_operations.c
rename to src/common_audio/signal_processing/vector_scaling_operations.c
index 47362ee..20d239c 100644
--- a/src/common_audio/signal_processing_library/main/source/vector_scaling_operations.c
+++ b/src/common_audio/signal_processing/vector_scaling_operations.c
@@ -125,7 +125,7 @@
     for (i = 0; i < in_vector_length; i++)
     {
         tmpW32 = WEBRTC_SPL_MUL_16_16_RSFT(*inptr++, gain, right_shifts);
-        ( *outptr++) = (WebRtc_Word16)WEBRTC_SPL_SAT(32767, tmpW32, -32768);
+        (*outptr++) = WebRtcSpl_SatW32ToW16(tmpW32);
     }
 }
 
diff --git a/src/common_audio/signal_processing_library/main/source/webrtc_fft_t_1024_8.c b/src/common_audio/signal_processing/webrtc_fft_t_1024_8.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/webrtc_fft_t_1024_8.c
rename to src/common_audio/signal_processing/webrtc_fft_t_1024_8.c
diff --git a/src/common_audio/signal_processing_library/main/source/webrtc_fft_t_rad.c b/src/common_audio/signal_processing/webrtc_fft_t_rad.c
similarity index 100%
rename from src/common_audio/signal_processing_library/main/source/webrtc_fft_t_rad.c
rename to src/common_audio/signal_processing/webrtc_fft_t_rad.c
diff --git a/src/common_audio/signal_processing_library/OWNERS b/src/common_audio/signal_processing_library/OWNERS
deleted file mode 100644
index cf595df..0000000
--- a/src/common_audio/signal_processing_library/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-bjornv@google.com
-tlegrand@google.com
-jks@google.com
diff --git a/src/common_audio/signal_processing_library/main/interface/spl_inl.h b/src/common_audio/signal_processing_library/main/interface/spl_inl.h
deleted file mode 100644
index 8716ca9..0000000
--- a/src/common_audio/signal_processing_library/main/interface/spl_inl.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-// This header file includes the inline functions in
-// the fix point signal processing library.
-
-#ifndef WEBRTC_SPL_SPL_INL_H_
-#define WEBRTC_SPL_SPL_INL_H_
-
-#ifdef WEBRTC_SPL_INLINE_CALLS
-
-#ifdef WEBRTC_ANDROID
-
-WEBRTC_INLINE WebRtc_Word32 WEBRTC_SPL_MUL(WebRtc_Word32 a, WebRtc_Word32 b)
-{
-    WebRtc_Word32 tmp;
-    __asm__("mul %0, %1, %2":"=r"(tmp):"r"(a), "r"(b));
-    return tmp;
-}
-
-WEBRTC_INLINE WebRtc_Word32 WEBRTC_SPL_MUL_16_32_RSFT16(WebRtc_Word16 a,
-                                                        WebRtc_Word32 b)
-{
-    WebRtc_Word32 tmp;
-    __asm__("smulwb %0, %1, %2":"=r"(tmp):"r"(b), "r"(a));
-    return tmp;
-}
-
-WEBRTC_INLINE WebRtc_Word32 WEBRTC_SPL_MUL_32_32_RSFT32(WebRtc_Word16 a,
-                                                      WebRtc_Word16 b,
-                                                      WebRtc_Word32 c)
-{
-    WebRtc_Word32 tmp;
-    __asm__("pkhbt %0, %1, %2, lsl #16" : "=r"(tmp) : "r"(b), "r"(a));
-    __asm__("smmul %0, %1, %2":"=r"(tmp):"r"(tmp), "r"(c));
-    return tmp;
-}
-
-WEBRTC_INLINE WebRtc_Word32 WEBRTC_SPL_MUL_32_32_RSFT32BI(
-        WebRtc_Word32 a,
-        WebRtc_Word32 b)
-{
-    WebRtc_Word32 tmp;
-    __asm__("smmul %0, %1, %2":"=r"(tmp):"r"(a), "r"(b));
-    return tmp;
-}
-
-WEBRTC_INLINE WebRtc_Word32 WEBRTC_SPL_MUL_16_16(WebRtc_Word16 a,
-                                                 WebRtc_Word16 b)
-{
-    WebRtc_Word32 tmp;
-    __asm__("smulbb %0, %1, %2":"=r"(tmp):"r"(a), "r"(b));
-    return tmp;
-}
-
-WEBRTC_INLINE WebRtc_Word16 WebRtcSpl_AddSatW16(WebRtc_Word16 a,
-                                                WebRtc_Word16 b)
-{
-    WebRtc_Word32 s_sum;
-
-    __asm__("qadd16 %0, %1, %2":"=r"(s_sum):"r"(a), "r"(b));
-
-    return (WebRtc_Word16) s_sum;
-}
-
-WEBRTC_INLINE WebRtc_Word32 WebRtcSpl_AddSatW32(WebRtc_Word32 l_var1,
-                                                WebRtc_Word32 l_var2)
-{
-    WebRtc_Word32 l_sum;
-
-    __asm__("qadd %0, %1, %2":"=r"(l_sum):"r"(l_var1), "r"(l_var2));
-
-    return l_sum;
-}
-
-WEBRTC_INLINE WebRtc_Word16 WebRtcSpl_SubSatW16(WebRtc_Word16 var1,
-                                                WebRtc_Word16 var2)
-{
-    WebRtc_Word32 s_sub;
-
-    __asm__("qsub16 %0, %1, %2":"=r"(s_sub):"r"(var1), "r"(var2));
-
-    return (WebRtc_Word16)s_sub;
-}
-
-WEBRTC_INLINE WebRtc_Word32 WebRtcSpl_SubSatW32(WebRtc_Word32 l_var1,
-                                                WebRtc_Word32 l_var2)
-{
-    WebRtc_Word32 l_sub;
-
-    __asm__("qsub %0, %1, %2":"=r"(l_sub):"r"(l_var1), "r"(l_var2));
-
-    return l_sub;
-}
-
-WEBRTC_INLINE WebRtc_Word16 WebRtcSpl_GetSizeInBits(WebRtc_UWord32 n)
-{
-    WebRtc_Word32 tmp;
-
-    __asm__("clz %0, %1":"=r"(tmp):"r"(n));
-
-    return (WebRtc_Word16)(32 - tmp);
-}
-
-WEBRTC_INLINE int WebRtcSpl_NormW32(WebRtc_Word32 a)
-{
-    WebRtc_Word32 tmp;
-
-    if (a <= 0) a ^= 0xFFFFFFFF;
-
-    __asm__("clz %0, %1":"=r"(tmp):"r"(a));
-
-    return tmp - 1;
-}
-
-WEBRTC_INLINE int WebRtcSpl_NormW16(WebRtc_Word16 a)
-{
-    int zeros;
-
-    if (a <= 0) a ^= 0xFFFF;
-
-    if (!(0xFF80 & a)) zeros = 8; else zeros = 0;
-    if (!(0xF800 & (a << zeros))) zeros += 4;
-    if (!(0xE000 & (a << zeros))) zeros += 2;
-    if (!(0xC000 & (a << zeros))) zeros += 1;
-
-    return zeros;
-}
-
-WEBRTC_INLINE int WebRtcSpl_NormU32(WebRtc_UWord32 a)
-{
-    int tmp;
-
-    if (a == 0) return 0;
-
-    __asm__("clz %0, %1":"=r"(tmp):"r"(a));
-
-    return tmp;
-}
-
-#else
-
-WEBRTC_INLINE WebRtc_Word16 WebRtcSpl_AddSatW16(WebRtc_Word16 a,
-                                                WebRtc_Word16 b)
-{
-    WebRtc_Word32 s_sum = (WebRtc_Word32) a + (WebRtc_Word32) b;
-
-    if (s_sum > WEBRTC_SPL_WORD16_MAX)
-    s_sum = WEBRTC_SPL_WORD16_MAX;
-    else if (s_sum < WEBRTC_SPL_WORD16_MIN)
-    s_sum = WEBRTC_SPL_WORD16_MIN;
-
-    return (WebRtc_Word16)s_sum;
-}
-
-WEBRTC_INLINE WebRtc_Word32 WebRtcSpl_AddSatW32(WebRtc_Word32 l_var1,
-                                                WebRtc_Word32 l_var2)
-{
-    WebRtc_Word32 l_sum;
-
-    // perform long addition
-    l_sum = l_var1 + l_var2;
-
-    // check for under or overflow
-    if (WEBRTC_SPL_IS_NEG (l_var1))
-    {
-        if (WEBRTC_SPL_IS_NEG (l_var2) && !WEBRTC_SPL_IS_NEG (l_sum))
-        {
-            l_sum = (WebRtc_Word32)0x80000000;
-        }
-    }
-    else
-    {
-        if (!WEBRTC_SPL_IS_NEG (l_var2) && WEBRTC_SPL_IS_NEG (l_sum))
-        {
-            l_sum = (WebRtc_Word32)0x7FFFFFFF;
-        }
-    }
-
-    return l_sum;
-}
-
-WEBRTC_INLINE WebRtc_Word16 WebRtcSpl_SubSatW16( WebRtc_Word16 var1,
-                                                 WebRtc_Word16 var2)
-{
-    WebRtc_Word32 l_diff;
-    WebRtc_Word16 s_diff;
-
-    // perform subtraction
-    l_diff = (WebRtc_Word32)var1 - (WebRtc_Word32)var2;
-
-    // default setting
-    s_diff = (WebRtc_Word16) l_diff;
-
-    // check for overflow
-    if (l_diff > (WebRtc_Word32)32767)
-    s_diff = (WebRtc_Word16)32767;
-
-    // check for underflow
-    if (l_diff < (WebRtc_Word32)-32768)
-    s_diff = (WebRtc_Word16)-32768;
-
-    return s_diff;
-}
-
-WEBRTC_INLINE WebRtc_Word32 WebRtcSpl_SubSatW32(WebRtc_Word32 l_var1,
-                                                WebRtc_Word32 l_var2)
-{
-    WebRtc_Word32 l_diff;
-
-    // perform subtraction
-    l_diff = l_var1 - l_var2;
-
-    // check for underflow
-    if ((l_var1 < 0) && (l_var2 > 0) && (l_diff > 0))
-    l_diff = (WebRtc_Word32)0x80000000;
-    // check for overflow
-    if ((l_var1 > 0) && (l_var2 < 0) && (l_diff < 0))
-    l_diff = (WebRtc_Word32)0x7FFFFFFF;
-
-    return l_diff;
-}
-
-WEBRTC_INLINE WebRtc_Word16 WebRtcSpl_GetSizeInBits(WebRtc_UWord32 n)
-{
-
-    int bits;
-
-    if ((0xFFFF0000 & n)) bits = 16; else bits = 0;
-    if ((0x0000FF00 & (n >> bits))) bits += 8;
-    if ((0x000000F0 & (n >> bits))) bits += 4;
-    if ((0x0000000C & (n >> bits))) bits += 2;
-    if ((0x00000002 & (n >> bits))) bits += 1;
-    if ((0x00000001 & (n >> bits))) bits += 1;
-
-    return bits;
-}
-
-WEBRTC_INLINE int WebRtcSpl_NormW32(WebRtc_Word32 a)
-{
-    int zeros;
-
-    if (a <= 0) a ^= 0xFFFFFFFF;
-
-    if (!(0xFFFF8000 & a)) zeros = 16; else zeros = 0;
-    if (!(0xFF800000 & (a << zeros))) zeros += 8;
-    if (!(0xF8000000 & (a << zeros))) zeros += 4;
-    if (!(0xE0000000 & (a << zeros))) zeros += 2;
-    if (!(0xC0000000 & (a << zeros))) zeros += 1;
-
-    return zeros;
-}
-
-WEBRTC_INLINE int WebRtcSpl_NormW16(WebRtc_Word16 a)
-{
-    int zeros;
-
-    if (a <= 0) a ^= 0xFFFF;
-
-    if (!(0xFF80 & a)) zeros = 8; else zeros = 0;
-    if (!(0xF800 & (a << zeros))) zeros += 4;
-    if (!(0xE000 & (a << zeros))) zeros += 2;
-    if (!(0xC000 & (a << zeros))) zeros += 1;
-
-    return zeros;
-}
-
-WEBRTC_INLINE int WebRtcSpl_NormU32(WebRtc_UWord32 a)
-{
-    int zeros;
-
-    if (a == 0) return 0;
-
-    if (!(0xFFFF0000 & a)) zeros = 16; else zeros = 0;
-    if (!(0xFF000000 & (a << zeros))) zeros += 8;
-    if (!(0xF0000000 & (a << zeros))) zeros += 4;
-    if (!(0xC0000000 & (a << zeros))) zeros += 2;
-    if (!(0x80000000 & (a << zeros))) zeros += 1;
-
-    return zeros;
-}
-
-#endif // WEBRTC_ANDROID
-#endif // WEBRTC_SPL_INLINE_CALLS
-#endif // WEBRTC_SPL_SPL_INL_H_
diff --git a/src/common_audio/signal_processing_library/main/source/add_sat_w16.c b/src/common_audio/signal_processing_library/main/source/add_sat_w16.c
deleted file mode 100644
index d103999..0000000
--- a/src/common_audio/signal_processing_library/main/source/add_sat_w16.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the function WebRtcSpl_AddSatW16().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-#ifndef SPL_NO_DOUBLE_IMPLEMENTATIONS
-
-WebRtc_Word16 WebRtcSpl_AddSatW16(WebRtc_Word16 var1, WebRtc_Word16 var2)
-{
-    WebRtc_Word32 s_sum = (WebRtc_Word32)var1 + (WebRtc_Word32)var2;
-
-    if (s_sum > WEBRTC_SPL_WORD16_MAX)
-        s_sum = WEBRTC_SPL_WORD16_MAX;
-    else if (s_sum < WEBRTC_SPL_WORD16_MIN)
-        s_sum = WEBRTC_SPL_WORD16_MIN;
-
-    return (WebRtc_Word16)s_sum;
-}
-
-#endif
diff --git a/src/common_audio/signal_processing_library/main/source/add_sat_w32.c b/src/common_audio/signal_processing_library/main/source/add_sat_w32.c
deleted file mode 100644
index 6d83e75..0000000
--- a/src/common_audio/signal_processing_library/main/source/add_sat_w32.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the function WebRtcSpl_AddSatW32().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-#ifndef SPL_NO_DOUBLE_IMPLEMENTATIONS
-
-WebRtc_Word32 WebRtcSpl_AddSatW32(WebRtc_Word32 var1, WebRtc_Word32 var2)
-{
-    WebRtc_Word32 l_sum;
-
-    // perform long addition
-    l_sum = var1 + var2;
-
-    // check for under or overflow
-    if (WEBRTC_SPL_IS_NEG(var1))
-    {
-        if (WEBRTC_SPL_IS_NEG(var2) && !WEBRTC_SPL_IS_NEG(l_sum))
-        {
-            l_sum = (WebRtc_Word32)0x80000000;
-        }
-    } else
-    {
-        if (!WEBRTC_SPL_IS_NEG(var2) && WEBRTC_SPL_IS_NEG(l_sum))
-        {
-            l_sum = (WebRtc_Word32)0x7FFFFFFF;
-        }
-    }
-
-    return l_sum;
-}
-
-#endif
diff --git a/src/common_audio/signal_processing_library/main/source/complex_fft.c b/src/common_audio/signal_processing_library/main/source/complex_fft.c
deleted file mode 100644
index b6f0c4e..0000000
--- a/src/common_audio/signal_processing_library/main/source/complex_fft.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the function WebRtcSpl_ComplexFFT().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-#define CFFTSFT 14
-#define CFFTRND 1
-#define CFFTRND2 16384
-
-#if (defined ARM9E_GCC) || (defined ARM_WINM) || (defined ANDROID_AECOPT)
-extern "C" int FFT_4OFQ14(void *src, void *dest, int NC, int shift);
-
-// For detailed description of the fft functions, check the readme files in fft_ARM9E folder.
-int WebRtcSpl_ComplexFFT2(WebRtc_Word16 frfi[], WebRtc_Word16 frfiOut[], int stages, int mode)
-{
-    return FFT_4OFQ14(frfi, frfiOut, 1 << stages, 0);
-}
-#endif
-
-int WebRtcSpl_ComplexFFT(WebRtc_Word16 frfi[], int stages, int mode)
-{
-    int i, j, l, k, istep, n, m;
-    WebRtc_Word16 wr, wi;
-    WebRtc_Word32 tr32, ti32, qr32, qi32;
-
-    /* The 1024-value is a constant given from the size of WebRtcSpl_kSinTable1024[],
-     * and should not be changed depending on the input parameter 'stages'
-     */
-    n = 1 << stages;
-    if (n > 1024)
-        return -1;
-
-    l = 1;
-    k = 10 - 1; /* Constant for given WebRtcSpl_kSinTable1024[]. Do not change
-         depending on the input parameter 'stages' */
-
-    if (mode == 0)
-    {
-        // mode==0: Low-complexity and Low-accuracy mode
-        while (l < n)
-        {
-            istep = l << 1;
-
-            for (m = 0; m < l; ++m)
-            {
-                j = m << k;
-
-                /* The 256-value is a constant given as 1/4 of the size of
-                 * WebRtcSpl_kSinTable1024[], and should not be changed depending on the input
-                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
-                 */
-                wr = WebRtcSpl_kSinTable1024[j + 256];
-                wi = -WebRtcSpl_kSinTable1024[j];
-
-                for (i = m; i < n; i += istep)
-                {
-                    j = i + l;
-
-                    tr32 = WEBRTC_SPL_RSHIFT_W32((WEBRTC_SPL_MUL_16_16(wr, frfi[2 * j])
-                            - WEBRTC_SPL_MUL_16_16(wi, frfi[2 * j + 1])), 15);
-
-                    ti32 = WEBRTC_SPL_RSHIFT_W32((WEBRTC_SPL_MUL_16_16(wr, frfi[2 * j + 1])
-                            + WEBRTC_SPL_MUL_16_16(wi, frfi[2 * j])), 15);
-
-                    qr32 = (WebRtc_Word32)frfi[2 * i];
-                    qi32 = (WebRtc_Word32)frfi[2 * i + 1];
-                    frfi[2 * j] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qr32 - tr32, 1);
-                    frfi[2 * j + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qi32 - ti32, 1);
-                    frfi[2 * i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qr32 + tr32, 1);
-                    frfi[2 * i + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qi32 + ti32, 1);
-                }
-            }
-
-            --k;
-            l = istep;
-
-        }
-
-    } else
-    {
-        // mode==1: High-complexity and High-accuracy mode
-        while (l < n)
-        {
-            istep = l << 1;
-
-            for (m = 0; m < l; ++m)
-            {
-                j = m << k;
-
-                /* The 256-value is a constant given as 1/4 of the size of
-                 * WebRtcSpl_kSinTable1024[], and should not be changed depending on the input
-                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
-                 */
-                wr = WebRtcSpl_kSinTable1024[j + 256];
-                wi = -WebRtcSpl_kSinTable1024[j];
-
-                for (i = m; i < n; i += istep)
-                {
-                    j = i + l;
-
-                    tr32 = WEBRTC_SPL_RSHIFT_W32((WEBRTC_SPL_MUL_16_16(wr, frfi[2 * j])
-                            - WEBRTC_SPL_MUL_16_16(wi, frfi[2 * j + 1]) + CFFTRND),
-                            15 - CFFTSFT);
-
-                    ti32 = WEBRTC_SPL_RSHIFT_W32((WEBRTC_SPL_MUL_16_16(wr, frfi[2 * j + 1])
-                            + WEBRTC_SPL_MUL_16_16(wi, frfi[2 * j]) + CFFTRND), 15 - CFFTSFT);
-
-                    qr32 = ((WebRtc_Word32)frfi[2 * i]) << CFFTSFT;
-                    qi32 = ((WebRtc_Word32)frfi[2 * i + 1]) << CFFTSFT;
-                    frfi[2 * j] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
-                            (qr32 - tr32 + CFFTRND2), 1 + CFFTSFT);
-                    frfi[2 * j + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
-                            (qi32 - ti32 + CFFTRND2), 1 + CFFTSFT);
-                    frfi[2 * i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
-                            (qr32 + tr32 + CFFTRND2), 1 + CFFTSFT);
-                    frfi[2 * i + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
-                            (qi32 + ti32 + CFFTRND2), 1 + CFFTSFT);
-                }
-            }
-
-            --k;
-            l = istep;
-        }
-    }
-    return 0;
-}
diff --git a/src/common_audio/signal_processing_library/main/source/complex_ifft.c b/src/common_audio/signal_processing_library/main/source/complex_ifft.c
deleted file mode 100644
index 184b8de..0000000
--- a/src/common_audio/signal_processing_library/main/source/complex_ifft.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the function WebRtcSpl_ComplexIFFT().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-#define CIFFTSFT 14
-#define CIFFTRND 1
-
-#if (defined ARM9E_GCC) || (defined ARM_WINM) || (defined ANDROID_AECOPT)
-extern "C" int FFT_4OIQ14(void *src, void *dest, int NC, int shift);
-
-// For detailed description of the fft functions, check the readme files in fft_ARM9E folder.
-int WebRtcSpl_ComplexIFFT2(WebRtc_Word16 frfi[], WebRtc_Word16 frfiOut[], int stages, int mode)
-{
-    FFT_4OIQ14(frfi, frfiOut, 1 << stages, 0);
-    return 0;
-}
-#endif
-
-int WebRtcSpl_ComplexIFFT(WebRtc_Word16 frfi[], int stages, int mode)
-{
-    int i, j, l, k, istep, n, m, scale, shift;
-    WebRtc_Word16 wr, wi;
-    WebRtc_Word32 tr32, ti32, qr32, qi32;
-    WebRtc_Word32 tmp32, round2;
-
-    /* The 1024-value is a constant given from the size of WebRtcSpl_kSinTable1024[],
-     * and should not be changed depending on the input parameter 'stages'
-     */
-    n = 1 << stages;
-    if (n > 1024)
-        return -1;
-
-    scale = 0;
-
-    l = 1;
-    k = 10 - 1; /* Constant for given WebRtcSpl_kSinTable1024[]. Do not change
-         depending on the input parameter 'stages' */
-
-    while (l < n)
-    {
-        // variable scaling, depending upon data
-        shift = 0;
-        round2 = 8192;
-
-        tmp32 = (WebRtc_Word32)WebRtcSpl_MaxAbsValueW16(frfi, 2 * n);
-        if (tmp32 > 13573)
-        {
-            shift++;
-            scale++;
-            round2 <<= 1;
-        }
-        if (tmp32 > 27146)
-        {
-            shift++;
-            scale++;
-            round2 <<= 1;
-        }
-
-        istep = l << 1;
-
-        if (mode == 0)
-        {
-            // mode==0: Low-complexity and Low-accuracy mode
-            for (m = 0; m < l; ++m)
-            {
-                j = m << k;
-
-                /* The 256-value is a constant given as 1/4 of the size of
-                 * WebRtcSpl_kSinTable1024[], and should not be changed depending on the input
-                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
-                 */
-                wr = WebRtcSpl_kSinTable1024[j + 256];
-                wi = WebRtcSpl_kSinTable1024[j];
-
-                for (i = m; i < n; i += istep)
-                {
-                    j = i + l;
-
-                    tr32 = WEBRTC_SPL_RSHIFT_W32((WEBRTC_SPL_MUL_16_16_RSFT(wr, frfi[2 * j], 0)
-                            - WEBRTC_SPL_MUL_16_16_RSFT(wi, frfi[2 * j + 1], 0)), 15);
-
-                    ti32 = WEBRTC_SPL_RSHIFT_W32(
-                            (WEBRTC_SPL_MUL_16_16_RSFT(wr, frfi[2 * j + 1], 0)
-                                    + WEBRTC_SPL_MUL_16_16_RSFT(wi,frfi[2*j],0)), 15);
-
-                    qr32 = (WebRtc_Word32)frfi[2 * i];
-                    qi32 = (WebRtc_Word32)frfi[2 * i + 1];
-                    frfi[2 * j] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qr32 - tr32, shift);
-                    frfi[2 * j + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qi32 - ti32, shift);
-                    frfi[2 * i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qr32 + tr32, shift);
-                    frfi[2 * i + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(qi32 + ti32, shift);
-                }
-            }
-        } else
-        {
-            // mode==1: High-complexity and High-accuracy mode
-
-            for (m = 0; m < l; ++m)
-            {
-                j = m << k;
-
-                /* The 256-value is a constant given as 1/4 of the size of
-                 * WebRtcSpl_kSinTable1024[], and should not be changed depending on the input
-                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
-                 */
-                wr = WebRtcSpl_kSinTable1024[j + 256];
-                wi = WebRtcSpl_kSinTable1024[j];
-
-                for (i = m; i < n; i += istep)
-                {
-                    j = i + l;
-
-                    tr32 = WEBRTC_SPL_RSHIFT_W32((WEBRTC_SPL_MUL_16_16_RSFT(wr, frfi[2 * j], 0)
-                            - WEBRTC_SPL_MUL_16_16_RSFT(wi, frfi[2 * j + 1], 0) + CIFFTRND),
-                            15 - CIFFTSFT);
-
-                    ti32 = WEBRTC_SPL_RSHIFT_W32(
-                                    (WEBRTC_SPL_MUL_16_16_RSFT(wr, frfi[2 * j + 1], 0)
-                                            + WEBRTC_SPL_MUL_16_16_RSFT(wi, frfi[2 * j], 0)
-                                            + CIFFTRND), 15 - CIFFTSFT);
-
-                    qr32 = ((WebRtc_Word32)frfi[2 * i]) << CIFFTSFT;
-                    qi32 = ((WebRtc_Word32)frfi[2 * i + 1]) << CIFFTSFT;
-                    frfi[2 * j] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32((qr32 - tr32+round2),
-                                                                       shift+CIFFTSFT);
-                    frfi[2 * j + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
-                            (qi32 - ti32 + round2), shift + CIFFTSFT);
-                    frfi[2 * i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32((qr32 + tr32 + round2),
-                                                                       shift + CIFFTSFT);
-                    frfi[2 * i + 1] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(
-                            (qi32 + ti32 + round2), shift + CIFFTSFT);
-                }
-            }
-
-        }
-        --k;
-        l = istep;
-    }
-    return scale;
-}
diff --git a/src/common_audio/signal_processing_library/main/source/cos_table.c b/src/common_audio/signal_processing_library/main/source/cos_table.c
deleted file mode 100644
index 7dba4b0..0000000
--- a/src/common_audio/signal_processing_library/main/source/cos_table.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the 360 degree cos table.
- *
- */
-
-#include "signal_processing_library.h"
-
-WebRtc_Word16 WebRtcSpl_kCosTable[] = {
-        8192,  8190,  8187,  8180,  8172,  8160,  8147,  8130,  8112,
-        8091,  8067,  8041,  8012,  7982,  7948,  7912,  7874,  7834,
-        7791,  7745,  7697,  7647,  7595,  7540,  7483,  7424,  7362,
-        7299,  7233,  7164,  7094,  7021,  6947,  6870,  6791,  6710,
-        6627,  6542,  6455,  6366,  6275,  6182,  6087,  5991,  5892,
-        5792,  5690,  5586,  5481,  5374,  5265,  5155,  5043,  4930,
-        4815,  4698,  4580,  4461,  4341,  4219,  4096,  3971,  3845,
-        3719,  3591,  3462,  3331,  3200,  3068,  2935,  2801,  2667,
-        2531,  2395,  2258,  2120,  1981,  1842,  1703,  1563,  1422,
-        1281,  1140,   998,   856,   713,   571,   428,   285,   142,
-           0,  -142,  -285,  -428,  -571,  -713,  -856,  -998, -1140,
-       -1281, -1422, -1563, -1703, -1842, -1981, -2120, -2258, -2395,
-       -2531, -2667, -2801, -2935, -3068, -3200, -3331, -3462, -3591,
-       -3719, -3845, -3971, -4095, -4219, -4341, -4461, -4580, -4698,
-       -4815, -4930, -5043, -5155, -5265, -5374, -5481, -5586, -5690,
-       -5792, -5892, -5991, -6087, -6182, -6275, -6366, -6455, -6542,
-       -6627, -6710, -6791, -6870, -6947, -7021, -7094, -7164, -7233,
-       -7299, -7362, -7424, -7483, -7540, -7595, -7647, -7697, -7745,
-       -7791, -7834, -7874, -7912, -7948, -7982, -8012, -8041, -8067,
-       -8091, -8112, -8130, -8147, -8160, -8172, -8180, -8187, -8190,
-       -8191, -8190, -8187, -8180, -8172, -8160, -8147, -8130, -8112,
-       -8091, -8067, -8041, -8012, -7982, -7948, -7912, -7874, -7834,
-       -7791, -7745, -7697, -7647, -7595, -7540, -7483, -7424, -7362,
-       -7299, -7233, -7164, -7094, -7021, -6947, -6870, -6791, -6710,
-       -6627, -6542, -6455, -6366, -6275, -6182, -6087, -5991, -5892,
-       -5792, -5690, -5586, -5481, -5374, -5265, -5155, -5043, -4930,
-       -4815, -4698, -4580, -4461, -4341, -4219, -4096, -3971, -3845,
-       -3719, -3591, -3462, -3331, -3200, -3068, -2935, -2801, -2667,
-       -2531, -2395, -2258, -2120, -1981, -1842, -1703, -1563, -1422,
-       -1281, -1140,  -998,  -856,  -713,  -571,  -428,  -285,  -142,
-           0,   142,   285,   428,   571,   713,   856,   998,  1140,
-        1281,  1422,  1563,  1703,  1842,  1981,  2120,  2258,  2395,
-        2531,  2667,  2801,  2935,  3068,  3200,  3331,  3462,  3591,
-        3719,  3845,  3971,  4095,  4219,  4341,  4461,  4580,  4698,
-        4815,  4930,  5043,  5155,  5265,  5374,  5481,  5586,  5690,
-        5792,  5892,  5991,  6087,  6182,  6275,  6366,  6455,  6542,
-        6627,  6710,  6791,  6870,  6947,  7021,  7094,  7164,  7233,
-        7299,  7362,  7424,  7483,  7540,  7595,  7647,  7697,  7745,
-        7791,  7834,  7874,  7912,  7948,  7982,  8012,  8041,  8067,
-        8091,  8112,  8130,  8147,  8160,  8172,  8180,  8187,  8190
-};
diff --git a/src/common_audio/signal_processing_library/main/source/get_hanning_window.c b/src/common_audio/signal_processing_library/main/source/get_hanning_window.c
deleted file mode 100644
index 2845c83..0000000
--- a/src/common_audio/signal_processing_library/main/source/get_hanning_window.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the function WebRtcSpl_GetHanningWindow().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-void WebRtcSpl_GetHanningWindow(WebRtc_Word16 *v, WebRtc_Word16 size)
-{
-    int jj;
-    WebRtc_Word16 *vptr1;
-
-    WebRtc_Word32 index;
-    WebRtc_Word32 factor = ((WebRtc_Word32)0x40000000);
-
-    factor = WebRtcSpl_DivW32W16(factor, size);
-    if (size < 513)
-        index = (WebRtc_Word32)-0x200000;
-    else
-        index = (WebRtc_Word32)-0x100000;
-    vptr1 = v;
-
-    for (jj = 0; jj < size; jj++)
-    {
-        index += factor;
-        (*vptr1++) = WebRtcSpl_kHanningTable[index >> 22];
-    }
-
-}
diff --git a/src/common_audio/signal_processing_library/main/source/get_size_in_bits.c b/src/common_audio/signal_processing_library/main/source/get_size_in_bits.c
deleted file mode 100644
index 53853f0..0000000
--- a/src/common_audio/signal_processing_library/main/source/get_size_in_bits.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the function WebRtcSpl_GetSizeInBits().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-#ifndef SPL_NO_DOUBLE_IMPLEMENTATIONS
-
-WebRtc_Word16 WebRtcSpl_GetSizeInBits(WebRtc_UWord32 value)
-{
-
-    int bits = 0;
-
-    // Fast binary search to find the number of bits used
-    if ((0xFFFF0000 & value))
-        bits = 16;
-    if ((0x0000FF00 & (value >> bits)))
-        bits += 8;
-    if ((0x000000F0 & (value >> bits)))
-        bits += 4;
-    if ((0x0000000C & (value >> bits)))
-        bits += 2;
-    if ((0x00000002 & (value >> bits)))
-        bits += 1;
-    if ((0x00000001 & (value >> bits)))
-        bits += 1;
-
-    return bits;
-}
-
-#endif
diff --git a/src/common_audio/signal_processing_library/main/source/norm_u32.c b/src/common_audio/signal_processing_library/main/source/norm_u32.c
deleted file mode 100644
index c903a64..0000000
--- a/src/common_audio/signal_processing_library/main/source/norm_u32.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the function WebRtcSpl_NormU32().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-#ifndef SPL_NO_DOUBLE_IMPLEMENTATIONS
-
-int WebRtcSpl_NormU32(WebRtc_UWord32 value)
-{
-    int zeros = 0;
-
-    if (value == 0)
-        return 0;
-
-    if (!(0xFFFF0000 & value))
-        zeros = 16;
-    if (!(0xFF000000 & (value << zeros)))
-        zeros += 8;
-    if (!(0xF0000000 & (value << zeros)))
-        zeros += 4;
-    if (!(0xC0000000 & (value << zeros)))
-        zeros += 2;
-    if (!(0x80000000 & (value << zeros)))
-        zeros += 1;
-
-    return zeros;
-}
-#endif
diff --git a/src/common_audio/signal_processing_library/main/source/norm_w16.c b/src/common_audio/signal_processing_library/main/source/norm_w16.c
deleted file mode 100644
index be6711d..0000000
--- a/src/common_audio/signal_processing_library/main/source/norm_w16.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the function WebRtcSpl_NormW16().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-#ifndef SPL_NO_DOUBLE_IMPLEMENTATIONS
-
-int WebRtcSpl_NormW16(WebRtc_Word16 value)
-{
-    int zeros = 0;
-
-    if (value <= 0)
-        value ^= 0xFFFF;
-
-    if ( !(0xFF80 & value))
-        zeros = 8;
-    if ( !(0xF800 & (value << zeros)))
-        zeros += 4;
-    if ( !(0xE000 & (value << zeros)))
-        zeros += 2;
-    if ( !(0xC000 & (value << zeros)))
-        zeros += 1;
-
-    return zeros;
-}
-#endif
diff --git a/src/common_audio/signal_processing_library/main/source/norm_w32.c b/src/common_audio/signal_processing_library/main/source/norm_w32.c
deleted file mode 100644
index d456335..0000000
--- a/src/common_audio/signal_processing_library/main/source/norm_w32.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the function WebRtcSpl_NormW32().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-#ifndef SPL_NO_DOUBLE_IMPLEMENTATIONS
-
-int WebRtcSpl_NormW32(WebRtc_Word32 value)
-{
-    int zeros = 0;
-
-    if (value <= 0)
-        value ^= 0xFFFFFFFF;
-
-    // Fast binary search to determine the number of left shifts required to 32-bit normalize
-    // the value
-    if (!(0xFFFF8000 & value))
-        zeros = 16;
-    if (!(0xFF800000 & (value << zeros)))
-        zeros += 8;
-    if (!(0xF8000000 & (value << zeros)))
-        zeros += 4;
-    if (!(0xE0000000 & (value << zeros)))
-        zeros += 2;
-    if (!(0xC0000000 & (value << zeros)))
-        zeros += 1;
-
-    return zeros;
-}
-
-#endif
diff --git a/src/common_audio/signal_processing_library/main/source/randomization_functions.c b/src/common_audio/signal_processing_library/main/source/randomization_functions.c
deleted file mode 100644
index 6bc87c7..0000000
--- a/src/common_audio/signal_processing_library/main/source/randomization_functions.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains implementations of the randomization functions
- * WebRtcSpl_IncreaseSeed()
- * WebRtcSpl_RandU()
- * WebRtcSpl_RandN()
- * WebRtcSpl_RandUArray()
- *
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-WebRtc_UWord32 WebRtcSpl_IncreaseSeed(WebRtc_UWord32 *seed)
-{
-    seed[0] = (seed[0] * ((WebRtc_Word32)69069) + 1) & (WEBRTC_SPL_MAX_SEED_USED - 1);
-    return seed[0];
-}
-
-WebRtc_Word16 WebRtcSpl_RandU(WebRtc_UWord32 *seed)
-{
-    return (WebRtc_Word16)(WebRtcSpl_IncreaseSeed(seed) >> 16);
-}
-
-WebRtc_Word16 WebRtcSpl_RandN(WebRtc_UWord32 *seed)
-{
-    return WebRtcSpl_kRandNTable[WebRtcSpl_IncreaseSeed(seed) >> 23];
-}
-
-// Creates an array of uniformly distributed variables
-WebRtc_Word16 WebRtcSpl_RandUArray(WebRtc_Word16* vector,
-                                   WebRtc_Word16 vector_length,
-                                   WebRtc_UWord32* seed)
-{
-    int i;
-    for (i = 0; i < vector_length; i++)
-    {
-        vector[i] = WebRtcSpl_RandU(seed);
-    }
-    return vector_length;
-}
diff --git a/src/common_audio/signal_processing_library/main/source/resample_by_2.c b/src/common_audio/signal_processing_library/main/source/resample_by_2.c
deleted file mode 100644
index 7ed4cfd..0000000
--- a/src/common_audio/signal_processing_library/main/source/resample_by_2.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the resampling by two functions.
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-// allpass filter coefficients.
-static const WebRtc_UWord16 kResampleAllpass1[3] = {3284, 24441, 49528};
-static const WebRtc_UWord16 kResampleAllpass2[3] = {12199, 37471, 60255};
-
-// decimator
-void WebRtcSpl_DownsampleBy2(const WebRtc_Word16* in, const WebRtc_Word16 len,
-                             WebRtc_Word16* out, WebRtc_Word32* filtState)
-{
-    const WebRtc_Word16 *inptr;
-    WebRtc_Word16 *outptr;
-    WebRtc_Word32 *state;
-    WebRtc_Word32 tmp1, tmp2, diff, in32, out32;
-    WebRtc_Word16 i;
-
-    // local versions of pointers to input and output arrays
-    inptr = in; // input array
-    outptr = out; // output array (of length len/2)
-    state = filtState; // filter state array; length = 8
-
-    for (i = (len >> 1); i > 0; i--)
-    {
-        // lower allpass filter
-        in32 = (WebRtc_Word32)(*inptr++) << 10;
-        diff = in32 - state[1];
-        tmp1 = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass2[0], diff, state[0] );
-        state[0] = in32;
-        diff = tmp1 - state[2];
-        tmp2 = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass2[1], diff, state[1] );
-        state[1] = tmp1;
-        diff = tmp2 - state[3];
-        state[3] = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass2[2], diff, state[2] );
-        state[2] = tmp2;
-
-        // upper allpass filter
-        in32 = (WebRtc_Word32)(*inptr++) << 10;
-        diff = in32 - state[5];
-        tmp1 = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass1[0], diff, state[4] );
-        state[4] = in32;
-        diff = tmp1 - state[6];
-        tmp2 = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass1[1], diff, state[5] );
-        state[5] = tmp1;
-        diff = tmp2 - state[7];
-        state[7] = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass1[2], diff, state[6] );
-        state[6] = tmp2;
-
-        // add two allpass outputs, divide by two and round
-        out32 = (state[3] + state[7] + 1024) >> 11;
-
-        // limit amplitude to prevent wrap-around, and write to output array
-        if (out32 > 32767)
-            *outptr++ = 32767;
-        else if (out32 < -32768)
-            *outptr++ = -32768;
-        else
-            *outptr++ = (WebRtc_Word16)out32;
-    }
-}
-
-void WebRtcSpl_UpsampleBy2(const WebRtc_Word16* in, WebRtc_Word16 len, WebRtc_Word16* out,
-                           WebRtc_Word32* filtState)
-{
-    const WebRtc_Word16 *inptr;
-    WebRtc_Word16 *outptr;
-    WebRtc_Word32 *state;
-    WebRtc_Word32 tmp1, tmp2, diff, in32, out32;
-    WebRtc_Word16 i;
-
-    // local versions of pointers to input and output arrays
-    inptr = in; // input array
-    outptr = out; // output array (of length len*2)
-    state = filtState; // filter state array; length = 8
-
-    for (i = len; i > 0; i--)
-    {
-        // lower allpass filter
-        in32 = (WebRtc_Word32)(*inptr++) << 10;
-        diff = in32 - state[1];
-        tmp1 = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass1[0], diff, state[0] );
-        state[0] = in32;
-        diff = tmp1 - state[2];
-        tmp2 = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass1[1], diff, state[1] );
-        state[1] = tmp1;
-        diff = tmp2 - state[3];
-        state[3] = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass1[2], diff, state[2] );
-        state[2] = tmp2;
-
-        // round; limit amplitude to prevent wrap-around; write to output array
-        out32 = (state[3] + 512) >> 10;
-        if (out32 > 32767)
-            *outptr++ = 32767;
-        else if (out32 < -32768)
-            *outptr++ = -32768;
-        else
-            *outptr++ = (WebRtc_Word16)out32;
-
-        // upper allpass filter
-        diff = in32 - state[5];
-        tmp1 = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass2[0], diff, state[4] );
-        state[4] = in32;
-        diff = tmp1 - state[6];
-        tmp2 = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass2[1], diff, state[5] );
-        state[5] = tmp1;
-        diff = tmp2 - state[7];
-        state[7] = WEBRTC_SPL_SCALEDIFF32( kResampleAllpass2[2], diff, state[6] );
-        state[6] = tmp2;
-
-        // round; limit amplitude to prevent wrap-around; write to output array
-        out32 = (state[7] + 512) >> 10;
-        if (out32 > 32767)
-            *outptr++ = 32767;
-        else if (out32 < -32768)
-            *outptr++ = -32768;
-        else
-            *outptr++ = (WebRtc_Word16)out32;
-    }
-}
diff --git a/src/common_audio/signal_processing_library/main/source/sin_table.c b/src/common_audio/signal_processing_library/main/source/sin_table.c
deleted file mode 100644
index ea44666..0000000
--- a/src/common_audio/signal_processing_library/main/source/sin_table.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the 360 degree sine table.
- *
- */
-
-#include "signal_processing_library.h"
-
-WebRtc_Word16 WebRtcSpl_kSinTable[] = {
-        0,    142,    285,    428,    571,    713,    856,    998,   1140,
-     1281,   1422,   1563,   1703,   1842,   1981,   2120,   2258,   2395,
-     2531,   2667,   2801,   2935,   3068,   3200,   3331,   3462,   3591,
-     3719,   3845,   3971,   4095,   4219,   4341,   4461,   4580,   4698,
-     4815,   4930,   5043,   5155,   5265,   5374,   5481,   5586,   5690,
-     5792,   5892,   5991,   6087,   6182,   6275,   6366,   6455,   6542,
-     6627,   6710,   6791,   6870,   6947,   7021,   7094,   7164,   7233,
-     7299,   7362,   7424,   7483,   7540,   7595,   7647,   7697,   7745,
-     7791,   7834,   7874,   7912,   7948,   7982,   8012,   8041,   8067,
-     8091,   8112,   8130,   8147,   8160,   8172,   8180,   8187,   8190,
-     8191,   8190,   8187,   8180,   8172,   8160,   8147,   8130,   8112,
-     8091,   8067,   8041,   8012,   7982,   7948,   7912,   7874,   7834,
-     7791,   7745,   7697,   7647,   7595,   7540,   7483,   7424,   7362,
-     7299,   7233,   7164,   7094,   7021,   6947,   6870,   6791,   6710,
-     6627,   6542,   6455,   6366,   6275,   6182,   6087,   5991,   5892,
-     5792,   5690,   5586,   5481,   5374,   5265,   5155,   5043,   4930,
-     4815,   4698,   4580,   4461,   4341,   4219,   4096,   3971,   3845,
-     3719,   3591,   3462,   3331,   3200,   3068,   2935,   2801,   2667,
-     2531,   2395,   2258,   2120,   1981,   1842,   1703,   1563,   1422,
-     1281,   1140,    998,    856,    713,    571,    428,    285,    142,
-        0,   -142,   -285,   -428,   -571,   -713,   -856,   -998,  -1140,
-    -1281,  -1422,  -1563,  -1703,  -1842,  -1981,  -2120,  -2258,  -2395,
-    -2531,  -2667,  -2801,  -2935,  -3068,  -3200,  -3331,  -3462,  -3591,
-    -3719,  -3845,  -3971,  -4095,  -4219,  -4341,  -4461,  -4580,  -4698,
-    -4815,  -4930,  -5043,  -5155,  -5265,  -5374,  -5481,  -5586,  -5690,
-    -5792,  -5892,  -5991,  -6087,  -6182,  -6275,  -6366,  -6455,  -6542,
-    -6627,  -6710,  -6791,  -6870,  -6947,  -7021,  -7094,  -7164,  -7233,
-    -7299,  -7362,  -7424,  -7483,  -7540,  -7595,  -7647,  -7697,  -7745,
-    -7791,  -7834,  -7874,  -7912,  -7948,  -7982,  -8012,  -8041,  -8067,
-    -8091,  -8112,  -8130,  -8147,  -8160,  -8172,  -8180,  -8187,  -8190,
-    -8191,  -8190,  -8187,  -8180,  -8172,  -8160,  -8147,  -8130,  -8112,
-    -8091,  -8067,  -8041,  -8012,  -7982,  -7948,  -7912,  -7874,  -7834,
-    -7791,  -7745,  -7697,  -7647,  -7595,  -7540,  -7483,  -7424,  -7362,
-    -7299,  -7233,  -7164,  -7094,  -7021,  -6947,  -6870,  -6791,  -6710,
-    -6627,  -6542,  -6455,  -6366,  -6275,  -6182,  -6087,  -5991,  -5892,
-    -5792,  -5690,  -5586,  -5481,  -5374,  -5265,  -5155,  -5043,  -4930,
-    -4815,  -4698,  -4580,  -4461,  -4341,  -4219,  -4096,  -3971,  -3845,
-    -3719,  -3591,  -3462,  -3331,  -3200,  -3068,  -2935,  -2801,  -2667,
-    -2531,  -2395,  -2258,  -2120,  -1981,  -1842,  -1703,  -1563,  -1422,
-    -1281,  -1140,   -998,   -856,   -713,   -571,   -428,   -285,   -142
-};
diff --git a/src/common_audio/signal_processing_library/main/source/sin_table_1024.c b/src/common_audio/signal_processing_library/main/source/sin_table_1024.c
deleted file mode 100644
index a2007f9..0000000
--- a/src/common_audio/signal_processing_library/main/source/sin_table_1024.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the 1024 point sine table.
- *
- */
-
-#include "signal_processing_library.h"
-
-WebRtc_Word16 WebRtcSpl_kSinTable1024[] =
-{
-      0,    201,    402,    603,    804,   1005,   1206,   1406,
-   1607,   1808,   2009,   2209,   2410,   2610,   2811,   3011,
-   3211,   3411,   3611,   3811,   4011,   4210,   4409,   4608,
-   4807,   5006,   5205,   5403,   5601,   5799,   5997,   6195,
-   6392,   6589,   6786,   6982,   7179,   7375,   7571,   7766,
-   7961,   8156,   8351,   8545,   8739,   8932,   9126,   9319,
-   9511,   9703,   9895,  10087,  10278,  10469,  10659,  10849,
-  11038,  11227,  11416,  11604,  11792,  11980,  12166,  12353,
-  12539,  12724,  12909,  13094,  13278,  13462,  13645,  13827,
-  14009,  14191,  14372,  14552,  14732,  14911,  15090,  15268,
-  15446,  15623,  15799,  15975,  16150,  16325,  16499,  16672,
-  16845,  17017,  17189,  17360,  17530,  17699,  17868,  18036,
-  18204,  18371,  18537,  18702,  18867,  19031,  19194,  19357,
-  19519,  19680,  19840,  20000,  20159,  20317,  20474,  20631,
-  20787,  20942,  21096,  21249,  21402,  21554,  21705,  21855,
-  22004,  22153,  22301,  22448,  22594,  22739,  22883,  23027,
-  23169,  23311,  23452,  23592,  23731,  23869,  24006,  24143,
-  24278,  24413,  24546,  24679,  24811,  24942,  25072,  25201,
-  25329,  25456,  25582,  25707,  25831,  25954,  26077,  26198,
-  26318,  26437,  26556,  26673,  26789,  26905,  27019,  27132,
-  27244,  27355,  27466,  27575,  27683,  27790,  27896,  28001,
-  28105,  28208,  28309,  28410,  28510,  28608,  28706,  28802,
-  28897,  28992,  29085,  29177,  29268,  29358,  29446,  29534,
-  29621,  29706,  29790,  29873,  29955,  30036,  30116,  30195,
-  30272,  30349,  30424,  30498,  30571,  30643,  30713,  30783,
-  30851,  30918,  30984,  31049,
-  31113,  31175,  31236,  31297,
-  31356,  31413,  31470,  31525,  31580,  31633,  31684,  31735,
-  31785,  31833,  31880,  31926,  31970,  32014,  32056,  32097,
-  32137,  32176,  32213,  32249,  32284,  32318,  32350,  32382,
-  32412,  32441,  32468,  32495,  32520,  32544,  32567,  32588,
-  32609,  32628,  32646,  32662,  32678,  32692,  32705,  32717,
-  32727,  32736,  32744,  32751,  32757,  32761,  32764,  32766,
-  32767,  32766,  32764,  32761,  32757,  32751,  32744,  32736,
-  32727,  32717,  32705,  32692,  32678,  32662,  32646,  32628,
-  32609,  32588,  32567,  32544,  32520,  32495,  32468,  32441,
-  32412,  32382,  32350,  32318,  32284,  32249,  32213,  32176,
-  32137,  32097,  32056,  32014,  31970,  31926,  31880,  31833,
-  31785,  31735,  31684,  31633,  31580,  31525,  31470,  31413,
-  31356,  31297,  31236,  31175,  31113,  31049,  30984,  30918,
-  30851,  30783,  30713,  30643,  30571,  30498,  30424,  30349,
-  30272,  30195,  30116,  30036,  29955,  29873,  29790,  29706,
-  29621,  29534,  29446,  29358,  29268,  29177,  29085,  28992,
-  28897,  28802,  28706,  28608,  28510,  28410,  28309,  28208,
-  28105,  28001,  27896,  27790,  27683,  27575,  27466,  27355,
-  27244,  27132,  27019,  26905,  26789,  26673,  26556,  26437,
-  26318,  26198,  26077,  25954,  25831,  25707,  25582,  25456,
-  25329,  25201,  25072,  24942,  24811,  24679,  24546,  24413,
-  24278,  24143,  24006,  23869,  23731,  23592,  23452,  23311,
-  23169,  23027,  22883,  22739,  22594,  22448,  22301,  22153,
-  22004,  21855,  21705,  21554,  21402,  21249,  21096,  20942,
-  20787,  20631,  20474,  20317,  20159,  20000,  19840,  19680,
-  19519,  19357,  19194,  19031,  18867,  18702,  18537,  18371,
-  18204,  18036,  17868,  17699,  17530,  17360,  17189,  17017,
-  16845,  16672,  16499,  16325,  16150,  15975,  15799,  15623,
-  15446,  15268,  15090,  14911,  14732,  14552,  14372,  14191,
-  14009,  13827,  13645,  13462,  13278,  13094,  12909,  12724,
-  12539,  12353,  12166,  11980,  11792,  11604,  11416,  11227,
-  11038,  10849,  10659,  10469,  10278,  10087,   9895,   9703,
-   9511,   9319,   9126,   8932,   8739,   8545,   8351,   8156,
-   7961,   7766,   7571,   7375,   7179,   6982,   6786,   6589,
-   6392,   6195,   5997,   5799,   5601,   5403,   5205,   5006,
-   4807,   4608,   4409,   4210,   4011,   3811,   3611,   3411,
-   3211,   3011,   2811,   2610,   2410,   2209,   2009,   1808,
-   1607,   1406,   1206,   1005,    804,    603,    402,    201,
-      0,   -201,   -402,   -603,   -804,  -1005,  -1206,  -1406,
-  -1607,  -1808,  -2009,  -2209,  -2410,  -2610,  -2811,  -3011,
-  -3211,  -3411,  -3611,  -3811,  -4011,  -4210,  -4409,  -4608,
-  -4807,  -5006,  -5205,  -5403,  -5601,  -5799,  -5997,  -6195,
-  -6392,  -6589,  -6786,  -6982,  -7179,  -7375,  -7571,  -7766,
-  -7961,  -8156,  -8351,  -8545,  -8739,  -8932,  -9126,  -9319,
-  -9511,  -9703,  -9895, -10087, -10278, -10469, -10659, -10849,
- -11038, -11227, -11416, -11604, -11792, -11980, -12166, -12353,
- -12539, -12724, -12909, -13094, -13278, -13462, -13645, -13827,
- -14009, -14191, -14372, -14552, -14732, -14911, -15090, -15268,
- -15446, -15623, -15799, -15975, -16150, -16325, -16499, -16672,
- -16845, -17017, -17189, -17360, -17530, -17699, -17868, -18036,
- -18204, -18371, -18537, -18702, -18867, -19031, -19194, -19357,
- -19519, -19680, -19840, -20000, -20159, -20317, -20474, -20631,
- -20787, -20942, -21096, -21249, -21402, -21554, -21705, -21855,
- -22004, -22153, -22301, -22448, -22594, -22739, -22883, -23027,
- -23169, -23311, -23452, -23592, -23731, -23869, -24006, -24143,
- -24278, -24413, -24546, -24679, -24811, -24942, -25072, -25201,
- -25329, -25456, -25582, -25707, -25831, -25954, -26077, -26198,
- -26318, -26437, -26556, -26673, -26789, -26905, -27019, -27132,
- -27244, -27355, -27466, -27575, -27683, -27790, -27896, -28001,
- -28105, -28208, -28309, -28410, -28510, -28608, -28706, -28802,
- -28897, -28992, -29085, -29177, -29268, -29358, -29446, -29534,
- -29621, -29706, -29790, -29873, -29955, -30036, -30116, -30195,
- -30272, -30349, -30424, -30498, -30571, -30643, -30713, -30783,
- -30851, -30918, -30984, -31049, -31113, -31175, -31236, -31297,
- -31356, -31413, -31470, -31525, -31580, -31633, -31684, -31735,
- -31785, -31833, -31880, -31926, -31970, -32014, -32056, -32097,
- -32137, -32176, -32213, -32249, -32284, -32318, -32350, -32382,
- -32412, -32441, -32468, -32495, -32520, -32544, -32567, -32588,
- -32609, -32628, -32646, -32662, -32678, -32692, -32705, -32717,
- -32727, -32736, -32744, -32751, -32757, -32761, -32764, -32766,
- -32767, -32766, -32764, -32761, -32757, -32751, -32744, -32736,
- -32727, -32717, -32705, -32692, -32678, -32662, -32646, -32628,
- -32609, -32588, -32567, -32544, -32520, -32495, -32468, -32441,
- -32412, -32382, -32350, -32318, -32284, -32249, -32213, -32176,
- -32137, -32097, -32056, -32014, -31970, -31926, -31880, -31833,
- -31785, -31735, -31684, -31633, -31580, -31525, -31470, -31413,
- -31356, -31297, -31236, -31175, -31113, -31049, -30984, -30918,
- -30851, -30783, -30713, -30643, -30571, -30498, -30424, -30349,
- -30272, -30195, -30116, -30036, -29955, -29873, -29790, -29706,
- -29621, -29534, -29446, -29358, -29268, -29177, -29085, -28992,
- -28897, -28802, -28706, -28608, -28510, -28410, -28309, -28208,
- -28105, -28001, -27896, -27790, -27683, -27575, -27466, -27355,
- -27244, -27132, -27019, -26905, -26789, -26673, -26556, -26437,
- -26318, -26198, -26077, -25954, -25831, -25707, -25582, -25456,
- -25329, -25201, -25072, -24942, -24811, -24679, -24546, -24413,
- -24278, -24143, -24006, -23869, -23731, -23592, -23452, -23311,
- -23169, -23027, -22883, -22739, -22594, -22448, -22301, -22153,
- -22004, -21855, -21705, -21554, -21402, -21249, -21096, -20942,
- -20787, -20631, -20474, -20317, -20159, -20000, -19840, -19680,
- -19519, -19357, -19194, -19031, -18867, -18702, -18537, -18371,
- -18204, -18036, -17868, -17699, -17530, -17360, -17189, -17017,
- -16845, -16672, -16499, -16325, -16150, -15975, -15799, -15623,
- -15446, -15268, -15090, -14911, -14732, -14552, -14372, -14191,
- -14009, -13827, -13645, -13462, -13278, -13094, -12909, -12724,
- -12539, -12353, -12166, -11980, -11792, -11604, -11416, -11227,
- -11038, -10849, -10659, -10469, -10278, -10087,  -9895,  -9703,
-  -9511,  -9319,  -9126,  -8932,  -8739,  -8545,  -8351,  -8156,
-  -7961,  -7766,  -7571,  -7375,  -7179,  -6982,  -6786,  -6589,
-  -6392,  -6195,  -5997,  -5799,  -5601,  -5403,  -5205,  -5006,
-  -4807,  -4608,  -4409,  -4210,  -4011,  -3811,  -3611,  -3411,
-  -3211,  -3011,  -2811,  -2610,  -2410,  -2209,  -2009,  -1808,
-  -1607,  -1406,  -1206,  -1005,   -804,   -603,   -402,   -201,
-};
diff --git a/src/common_audio/signal_processing_library/main/source/sub_sat_w16.c b/src/common_audio/signal_processing_library/main/source/sub_sat_w16.c
deleted file mode 100644
index a48c3d5..0000000
--- a/src/common_audio/signal_processing_library/main/source/sub_sat_w16.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the function WebRtcSpl_SubSatW16().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-#ifndef SPL_NO_DOUBLE_IMPLEMENTATIONS
-#ifndef XSCALE_OPT
-
-WebRtc_Word16 WebRtcSpl_SubSatW16(WebRtc_Word16 var1, WebRtc_Word16 var2)
-{
-    WebRtc_Word32 l_diff;
-    WebRtc_Word16 s_diff;
-
-    // perform subtraction
-    l_diff = (WebRtc_Word32)var1 - (WebRtc_Word32)var2;
-
-    // default setting
-    s_diff = (WebRtc_Word16)l_diff;
-
-    // check for overflow
-    if (l_diff > (WebRtc_Word32)32767)
-        s_diff = (WebRtc_Word16)32767;
-
-    // check for underflow
-    if (l_diff < (WebRtc_Word32)-32768)
-        s_diff = (WebRtc_Word16)-32768;
-
-    return s_diff;
-}
-
-#else
-#pragma message(">> WebRtcSpl_SubSatW16.c is excluded from this build")
-#endif // XSCALE_OPT
-#endif // SPL_NO_DOUBLE_IMPLEMENTATIONS
diff --git a/src/common_audio/signal_processing_library/main/source/sub_sat_w32.c b/src/common_audio/signal_processing_library/main/source/sub_sat_w32.c
deleted file mode 100644
index add3675..0000000
--- a/src/common_audio/signal_processing_library/main/source/sub_sat_w32.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the function WebRtcSpl_SubSatW32().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#include "signal_processing_library.h"
-
-#ifndef SPL_NO_DOUBLE_IMPLEMENTATIONS
-
-WebRtc_Word32 WebRtcSpl_SubSatW32(WebRtc_Word32 var1, WebRtc_Word32 var2)
-{
-    WebRtc_Word32 l_diff;
-
-    // perform subtraction
-    l_diff = var1 - var2;
-
-    // check for underflow
-    if ((var1 < 0) && (var2 > 0) && (l_diff > 0))
-        l_diff = (WebRtc_Word32)0x80000000;
-    // check for overflow
-    if ((var1 > 0) && (var2 < 0) && (l_diff < 0))
-        l_diff = (WebRtc_Word32)0x7FFFFFFF;
-
-    return l_diff;
-}
-
-#endif
diff --git a/src/common_audio/signal_processing_library/main/source/webrtc_fft_4ofq14_gcc_android.s b/src/common_audio/signal_processing_library/main/source/webrtc_fft_4ofq14_gcc_android.s
deleted file mode 100644
index c1a893b..0000000
--- a/src/common_audio/signal_processing_library/main/source/webrtc_fft_4ofq14_gcc_android.s
+++ /dev/null
@@ -1,227 +0,0 @@
-  .globl FFT_4OFQ14

-

-FFT_4OFQ14:

-  stmdb       sp!, {r4 - r11, lr}

-  ldr         lr, =s_Q14S_8

-  ldr         lr, [lr]

-  cmp         r2, lr

-  movgt       r0, #1

-  ldmgtia     sp!, {r4 - r11, pc}

-  stmdb       sp!, {r1, r2}

-  mov         r3, #0

-  mov         r2, r2

-

-LBL1:

-  add         r12, r0, r3, lsl #2

-  add         r12, r12, r2, lsr #1

-  ldrsh       r5, [r12, #2]

-  ldrsh       r4, [r12], +r2

-  ldrsh       r9, [r12, #2]

-  ldrsh       r8, [r12], +r2

-  ldrsh       r7, [r12, #2]

-  ldrsh       r6, [r12], +r2

-  ldrsh       r11, [r12, #2]

-  ldrsh       r10, [r12], +r2

-  add         r4, r4, r6

-  add         r5, r5, r7

-  sub         r6, r4, r6, lsl #1

-  sub         r7, r5, r7, lsl #1

-  sub         r12, r8, r10

-  sub         lr, r9, r11

-  add         r10, r8, r10

-  add         r11, r9, r11

-  sub         r9, r4, r10

-  sub         r8, r5, r11

-  add         r4, r4, r10

-  add         r5, r5, r11

-  sub         r10, r6, lr

-  add         r11, r7, r12

-  add         r6, r6, lr

-  sub         r7, r7, r12

-  ldr         lr, =t_Q14R_rad8

-  ldrsh       lr, [lr]

-  stmdb       sp!, {r2}

-  add         r12, r6, r7

-  mul         r6, r12, lr

-  rsb         r12, r12, r7, lsl #1

-  mul         r7, r12, lr

-  sub         r12, r11, r10

-  mul         r10, r12, lr

-  sub         r12, r12, r11, lsl #1

-  mul         r11, r12, lr

-  ldmia       sp!, {r2}

-  stmdb       sp!, {r4 - r11}

-  add         r4, r0, r3, lsl #2

-  ldrsh       r7, [r4, #2]

-  ldrsh       r6, [r4], +r2

-  ldrsh       r11, [r4, #2]

-  ldrsh       r10, [r4], +r2

-  ldrsh       r9, [r4, #2]

-  ldrsh       r8, [r4], +r2

-  ldrsh       lr, [r4, #2]

-  ldrsh       r12, [r4], +r2

-  mov         r7, r7, asr #3

-  mov         r6, r6, asr #3

-  add         r6, r6, r8, asr #3

-  add         r7, r7, r9, asr #3

-  sub         r8, r6, r8, asr #2

-  sub         r9, r7, r9, asr #2

-  sub         r4, r10, r12

-  sub         r5, r11, lr

-  add         r10, r10, r12

-  add         r11, r11, lr

-  add         r6, r6, r10, asr #3

-  add         r7, r7, r11, asr #3

-  sub         r10, r6, r10, asr #2

-  sub         r11, r7, r11, asr #2

-  sub         r12, r8, r5, asr #3

-  add         lr, r9, r4, asr #3

-  add         r8, r8, r5, asr #3

-  sub         r9, r9, r4, asr #3

-  ldmia       sp!, {r4, r5}

-  add         r6, r6, r4, asr #3

-  add         r7, r7, r5, asr #3

-  sub         r4, r6, r4, asr #2

-  sub         r5, r7, r5, asr #2

-  strh        r7, [r1, #2]

-  strh        r6, [r1], #4

-  ldmia       sp!, {r6, r7}

-  add         r8, r8, r6, asr #17

-  add         r9, r9, r7, asr #17

-  sub         r6, r8, r6, asr #16

-  sub         r7, r9, r7, asr #16

-  strh        r9, [r1, #2]

-  strh        r8, [r1], #4

-  ldmia       sp!, {r8, r9}

-  add         r10, r10, r8, asr #3

-  sub         r11, r11, r9, asr #3

-  sub         r8, r10, r8, asr #2

-  add         r9, r11, r9, asr #2

-  strh        r11, [r1, #2]

-  strh        r10, [r1], #4

-  ldmia       sp!, {r10, r11}

-  add         r12, r12, r10, asr #17

-  add         lr, lr, r11, asr #17

-  sub         r10, r12, r10, asr #16

-  sub         r11, lr, r11, asr #16

-  strh        lr, [r1, #2]

-  strh        r12, [r1], #4

-  strh        r5, [r1, #2]

-  strh        r4, [r1], #4

-  strh        r7, [r1, #2]

-  strh        r6, [r1], #4

-  strh        r9, [r1, #2]

-  strh        r8, [r1], #4

-  strh        r11, [r1, #2]

-  strh        r10, [r1], #4

-  eor         r3, r3, r2, lsr #4

-  tst         r3, r2, lsr #4

-  bne         LBL1

-

-  eor         r3, r3, r2, lsr #5

-  tst         r3, r2, lsr #5

-  bne         LBL1

-

-  mov         r12, r2, lsr #6

-

-LBL2:

-  eor         r3, r3, r12

-  tst         r3, r12

-  bne         LBL1

-

-  movs        r12, r12, lsr #1

-  bne         LBL2

-

-  ldmia       sp!, {r1, r2}

-  mov         r3, r2, lsr #3

-  mov         r2, #0x20

-  ldr         r0, =t_Q14S_8

-  cmp         r3, #1

-  beq         LBL3

-

-LBL6:

-  mov         r3, r3, lsr #2

-  stmdb       sp!, {r1, r3}

-  add         r12, r2, r2, lsl #1

-  add         r1, r1, r12

-  sub         r3, r3, #1, 16

-

-LBL5:

-  add         r3, r3, r2, lsl #14

-

-LBL4:

-  ldrsh       r6, [r0], #2

-  ldrsh       r7, [r0], #2

-  ldrsh       r8, [r0], #2

-  ldrsh       r9, [r0], #2

-  ldrsh       r10, [r0], #2

-  ldrsh       r11, [r0], #2

-  ldrsh       r5, [r1, #2]

-  ldrsh       r4, [r1], -r2

-  sub         lr, r5, r4

-  mul         r12, lr, r11

-  add         lr, r10, r11, lsl #1

-  mla         r11, r5, r10, r12

-  mla         r10, r4, lr, r12

-  ldrsh       r5, [r1, #2]

-  ldrsh       r4, [r1], -r2

-  sub         lr, r5, r4

-  mul         r12, lr, r9

-  add         lr, r8, r9, lsl #1

-  mla         r9, r5, r8, r12

-  mla         r8, r4, lr, r12

-  ldrsh       r5, [r1, #2]

-  ldrsh       r4, [r1], -r2

-  sub         lr, r5, r4

-  mul         r12, lr, r7

-  add         lr, r6, r7, lsl #1

-  mla         r7, r5, r6, r12

-  mla         r6, r4, lr, r12

-  ldrsh       r5, [r1, #2]

-  ldrsh       r4, [r1]

-  mov         r5, r5, asr #2

-  mov         r4, r4, asr #2

-  add         r12, r4, r6, asr #16

-  add         lr, r5, r7, asr #16

-  sub         r4, r4, r6, asr #16

-  sub         r5, r5, r7, asr #16

-  add         r6, r8, r10

-  add         r7, r9, r11

-  sub         r8, r8, r10

-  sub         r9, r9, r11

-  add         r10, r12, r6, asr #16

-  add         r11, lr, r7, asr #16

-  strh        r11, [r1, #2]

-  strh        r10, [r1], +r2

-  add         r10, r4, r9, asr #16

-  sub         r11, r5, r8, asr #16

-  strh        r11, [r1, #2]

-  strh        r10, [r1], +r2

-  sub         r10, r12, r6, asr #16

-  sub         r11, lr, r7, asr #16

-  strh        r11, [r1, #2]

-  strh        r10, [r1], +r2

-  sub         r10, r4, r9, asr #16

-  add         r11, r5, r8, asr #16

-  strh        r11, [r1, #2]

-  strh        r10, [r1], #4

-  subs        r3, r3, #1, 16

-  bge         LBL4

-  add         r12, r2, r2, lsl #1

-  add         r1, r1, r12

-  sub         r0, r0, r12

-  sub         r3, r3, #1

-  movs        lr, r3, lsl #16

-  bne         LBL5

-  add         r0, r0, r12

-  ldmia       sp!, {r1, r3}

-  mov         r2, r2, lsl #2

-  cmp         r3, #2

-  bgt         LBL6

-

-LBL3:

-  mov         r0, #0

-  ldmia       sp!, {r4 - r11, pc}

-  andeq       r3, r1, r0, lsr #32

-  andeq       r10, r1, r12, ror #31

-  andeq       r3, r1, r8, lsr #32

diff --git a/src/common_audio/signal_processing_library/main/source/webrtc_fft_4oiq14_gcc_android.s b/src/common_audio/signal_processing_library/main/source/webrtc_fft_4oiq14_gcc_android.s
deleted file mode 100644
index cc93291..0000000
--- a/src/common_audio/signal_processing_library/main/source/webrtc_fft_4oiq14_gcc_android.s
+++ /dev/null
@@ -1,221 +0,0 @@
-  .globl FFT_4OIQ14

-

-FFT_4OIQ14:

-  stmdb       sp!, {r4 - r11, lr}

-  ldr         lr, =s_Q14S_8

-  ldr         lr, [lr]

-  cmp         r2, lr

-  movgt       r0, #1

-  ldmgtia     sp!, {r4 - r11, pc}

-  stmdb       sp!, {r1, r2}

-  mov         r3, #0

-  mov         r2, r2

-

-LBL1:

-  add         r12, r0, r3, lsl #2

-  add         r12, r12, r2, lsr #1

-  ldrsh       r5, [r12, #2]

-  ldrsh       r4, [r12], +r2

-  ldrsh       r9, [r12, #2]

-  ldrsh       r8, [r12], +r2

-  ldrsh       r7, [r12, #2]

-  ldrsh       r6, [r12], +r2

-  ldrsh       r11, [r12, #2]

-  ldrsh       r10, [r12], +r2

-  add         r4, r4, r6

-  add         r5, r5, r7

-  sub         r6, r4, r6, lsl #1

-  sub         r7, r5, r7, lsl #1

-  sub         r12, r8, r10

-  sub         lr, r9, r11

-  add         r10, r8, r10

-  add         r11, r9, r11

-  sub         r9, r4, r10

-  sub         r8, r5, r11

-  add         r4, r4, r10

-  add         r5, r5, r11

-  add         r10, r6, lr

-  sub         r11, r7, r12

-  sub         r6, r6, lr

-  add         r7, r7, r12

-  ldr         lr, =t_Q14R_rad8

-  ldrsh       lr, [lr]

-  stmdb       sp!, {r2}

-  sub         r12, r6, r7

-  mul         r6, r12, lr

-  add         r12, r12, r7, lsl #1

-  mul         r7, r12, lr

-  sub         r12, r10, r11

-  mul         r11, r12, lr

-  sub         r12, r12, r10, lsl #1

-  mul         r10, r12, lr

-  ldmia       sp!, {r2}

-  stmdb       sp!, {r4 - r11}

-  add         r4, r0, r3, lsl #2

-  ldrsh       r7, [r4, #2]

-  ldrsh       r6, [r4], +r2

-  ldrsh       r11, [r4, #2]

-  ldrsh       r10, [r4], +r2

-  ldrsh       r9, [r4, #2]

-  ldrsh       r8, [r4], +r2

-  ldrsh       lr, [r4, #2]

-  ldrsh       r12, [r4], +r2

-  add         r6, r6, r8

-  add         r7, r7, r9

-  sub         r8, r6, r8, lsl #1

-  sub         r9, r7, r9, lsl #1

-  sub         r4, r10, r12

-  sub         r5, r11, lr

-  add         r10, r10, r12

-  add         r11, r11, lr

-  add         r6, r6, r10

-  add         r7, r7, r11

-  sub         r10, r6, r10, lsl #1

-  sub         r11, r7, r11, lsl #1

-  add         r12, r8, r5

-  sub         lr, r9, r4

-  sub         r8, r8, r5

-  add         r9, r9, r4

-  ldmia       sp!, {r4, r5}

-  add         r6, r6, r4

-  add         r7, r7, r5

-  sub         r4, r6, r4, lsl #1

-  sub         r5, r7, r5, lsl #1

-  strh        r7, [r1, #2]

-  strh        r6, [r1], #4

-  ldmia       sp!, {r6, r7}

-  add         r8, r8, r6, asr #14

-  add         r9, r9, r7, asr #14

-  sub         r6, r8, r6, asr #13

-  sub         r7, r9, r7, asr #13

-  strh        r9, [r1, #2]

-  strh        r8, [r1], #4

-  ldmia       sp!, {r8, r9}

-  sub         r10, r10, r8

-  add         r11, r11, r9

-  add         r8, r10, r8, lsl #1

-  sub         r9, r11, r9, lsl #1

-  strh        r11, [r1, #2]

-  strh        r10, [r1], #4

-  ldmia       sp!, {r10, r11}

-  add         r12, r12, r10, asr #14

-  add         lr, lr, r11, asr #14

-  sub         r10, r12, r10, asr #13

-  sub         r11, lr, r11, asr #13

-  strh        lr, [r1, #2]

-  strh        r12, [r1], #4

-  strh        r5, [r1, #2]

-  strh        r4, [r1], #4

-  strh        r7, [r1, #2]

-  strh        r6, [r1], #4

-  strh        r9, [r1, #2]

-  strh        r8, [r1], #4

-  strh        r11, [r1, #2]

-  strh        r10, [r1], #4

-  eor         r3, r3, r2, lsr #4

-  tst         r3, r2, lsr #4

-  bne         LBL1

-  eor         r3, r3, r2, lsr #5

-  tst         r3, r2, lsr #5

-  bne         LBL1

-  mov         r12, r2, lsr #6

-

-

-LBL2:

-  eor         r3, r3, r12

-  tst         r3, r12

-  bne         LBL1

-  movs        r12, r12, lsr #1

-  bne         LBL2

-  ldmia       sp!, {r1, r2}

-  mov         r3, r2, lsr #3

-  mov         r2, #0x20

-  ldr         r0, =t_Q14S_8

-  cmp         r3, #1

-  beq         LBL3

-

-LBL6:

-  mov         r3, r3, lsr #2

-  stmdb       sp!, {r1, r3}

-  add         r12, r2, r2, lsl #1

-  add         r1, r1, r12

-  sub         r3, r3, #1, 16

-

-LBL5:

-  add         r3, r3, r2, lsl #14

-

-LBL4:

-  ldrsh       r6, [r0], #2

-  ldrsh       r7, [r0], #2

-  ldrsh       r8, [r0], #2

-  ldrsh       r9, [r0], #2

-  ldrsh       r10, [r0], #2

-  ldrsh       r11, [r0], #2

-  ldrsh       r5, [r1, #2]

-  ldrsh       r4, [r1], -r2

-  sub         lr, r4, r5

-  mul         r12, lr, r11

-  add         r11, r10, r11, lsl #1

-  mla         r10, r4, r10, r12

-  mla         r11, r5, r11, r12

-  ldrsh       r5, [r1, #2]

-  ldrsh       r4, [r1], -r2

-  sub         lr, r4, r5

-  mul         r12, lr, r9

-  add         r9, r8, r9, lsl #1

-  mla         r8, r4, r8, r12

-  mla         r9, r5, r9, r12

-  ldrsh       r5, [r1, #2]

-  ldrsh       r4, [r1], -r2

-  sub         lr, r4, r5

-  mul         r12, lr, r7

-  add         r7, r6, r7, lsl #1

-  mla         r6, r4, r6, r12

-  mla         r7, r5, r7, r12

-  ldrsh       r5, [r1, #2]

-  ldrsh       r4, [r1]

-  add         r12, r4, r6, asr #14

-  add         lr, r5, r7, asr #14

-  sub         r4, r4, r6, asr #14

-  sub         r5, r5, r7, asr #14

-  add         r6, r8, r10

-  add         r7, r9, r11

-  sub         r8, r8, r10

-  sub         r9, r9, r11

-  add         r10, r12, r6, asr #14

-  add         r11, lr, r7, asr #14

-  strh        r11, [r1, #2]

-  strh        r10, [r1], +r2

-  sub         r10, r4, r9, asr #14

-  add         r11, r5, r8, asr #14

-  strh        r11, [r1, #2]

-  strh        r10, [r1], +r2

-  sub         r10, r12, r6, asr #14

-  sub         r11, lr, r7, asr #14

-  strh        r11, [r1, #2]

-  strh        r10, [r1], +r2

-  add         r10, r4, r9, asr #14

-  sub         r11, r5, r8, asr #14

-  strh        r11, [r1, #2]

-  strh        r10, [r1], #4

-  subs        r3, r3, #1, 16

-  bge         LBL4

-  add         r12, r2, r2, lsl #1

-  add         r1, r1, r12

-  sub         r0, r0, r12

-  sub         r3, r3, #1

-  movs        lr, r3, lsl #16

-  bne         LBL5

-  add         r0, r0, r12

-  ldmia       sp!, {r1, r3}

-  mov         r2, r2, lsl #2

-  cmp         r3, #2

-  bgt         LBL6

-

-LBL3:

-  mov         r0, #0

-  ldmia       sp!, {r4 - r11, pc}

-  andeq       r3, r1, r0, lsr #32

-  andeq       r10, r1, r12, ror #31

-  andeq       r3, r1, r8, lsr #32

-

diff --git a/src/common_audio/signal_processing_library/main/test/unit_test/unit_test.cc b/src/common_audio/signal_processing_library/main/test/unit_test/unit_test.cc
deleted file mode 100644
index 5adc339..0000000
--- a/src/common_audio/signal_processing_library/main/test/unit_test/unit_test.cc
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file contains the SPL unit_test.
- *
- */
-
-#include "unit_test.h"
-#include "signal_processing_library.h"
-
-class SplEnvironment : public ::testing::Environment {
- public:
-  virtual void SetUp() {
-  }
-  virtual void TearDown() {
-  }
-};
-
-SplTest::SplTest()
-{
-}
-
-void SplTest::SetUp() {
-}
-
-void SplTest::TearDown() {
-}
-
-TEST_F(SplTest, MacroTest) {
-    // Macros with inputs.
-    int A = 10;
-    int B = 21;
-    int a = -3;
-    int b = WEBRTC_SPL_WORD32_MAX;
-    int nr = 2;
-    int d_ptr1 = 0;
-    int d_ptr2 = 0;
-
-    EXPECT_EQ(10, WEBRTC_SPL_MIN(A, B));
-    EXPECT_EQ(21, WEBRTC_SPL_MAX(A, B));
-
-    EXPECT_EQ(3, WEBRTC_SPL_ABS_W16(a));
-    EXPECT_EQ(3, WEBRTC_SPL_ABS_W32(a));
-    EXPECT_EQ(0, WEBRTC_SPL_GET_BYTE(&B, nr));
-    WEBRTC_SPL_SET_BYTE(&d_ptr2, 1, nr);
-    EXPECT_EQ(65536, d_ptr2);
-
-    EXPECT_EQ(-63, WEBRTC_SPL_MUL(a, B));
-    EXPECT_EQ(-2147483645, WEBRTC_SPL_MUL(a, b));
-    EXPECT_EQ(-2147483645, WEBRTC_SPL_UMUL(a, b));
-    b = WEBRTC_SPL_WORD16_MAX >> 1;
-    EXPECT_EQ(65535, WEBRTC_SPL_UMUL_RSFT16(a, b));
-    EXPECT_EQ(1073627139, WEBRTC_SPL_UMUL_16_16(a, b));
-    EXPECT_EQ(16382, WEBRTC_SPL_UMUL_16_16_RSFT16(a, b));
-    EXPECT_EQ(-49149, WEBRTC_SPL_UMUL_32_16(a, b));
-    EXPECT_EQ(65535, WEBRTC_SPL_UMUL_32_16_RSFT16(a, b));
-    EXPECT_EQ(-49149, WEBRTC_SPL_MUL_16_U16(a, b));
-
-    a = b;
-    b = -3;
-    EXPECT_EQ(-5461, WEBRTC_SPL_DIV(a, b));
-    EXPECT_EQ(0, WEBRTC_SPL_UDIV(a, b));
-
-    EXPECT_EQ(-1, WEBRTC_SPL_MUL_16_32_RSFT16(a, b));
-    EXPECT_EQ(-1, WEBRTC_SPL_MUL_16_32_RSFT15(a, b));
-    EXPECT_EQ(-3, WEBRTC_SPL_MUL_16_32_RSFT14(a, b));
-    EXPECT_EQ(-24, WEBRTC_SPL_MUL_16_32_RSFT11(a, b));
-
-    int a32 = WEBRTC_SPL_WORD32_MAX;
-    int a32a = (WEBRTC_SPL_WORD32_MAX >> 16);
-    int a32b = (WEBRTC_SPL_WORD32_MAX & 0x0000ffff);
-    EXPECT_EQ(5, WEBRTC_SPL_MUL_32_32_RSFT32(a32a, a32b, A));
-    EXPECT_EQ(5, WEBRTC_SPL_MUL_32_32_RSFT32BI(a32, A));
-
-    EXPECT_EQ(-49149, WEBRTC_SPL_MUL_16_16(a, b));
-    EXPECT_EQ(-12288, WEBRTC_SPL_MUL_16_16_RSFT(a, b, 2));
-
-    EXPECT_EQ(-12287, WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(a, b, 2));
-    EXPECT_EQ(-1, WEBRTC_SPL_MUL_16_16_RSFT_WITH_FIXROUND(a, b));
-
-    EXPECT_EQ(16380, WEBRTC_SPL_ADD_SAT_W32(a, b));
-    EXPECT_EQ(21, WEBRTC_SPL_SAT(a, A, B));
-    EXPECT_EQ(21, WEBRTC_SPL_SAT(a, B, A));
-    EXPECT_EQ(-49149, WEBRTC_SPL_MUL_32_16(a, b));
-
-    EXPECT_EQ(16386, WEBRTC_SPL_SUB_SAT_W32(a, b));
-    EXPECT_EQ(16380, WEBRTC_SPL_ADD_SAT_W16(a, b));
-    EXPECT_EQ(16386, WEBRTC_SPL_SUB_SAT_W16(a, b));
-
-    EXPECT_TRUE(WEBRTC_SPL_IS_NEG(b));
-
-    // Shifting with negative numbers allowed
-    // Positive means left shift
-    EXPECT_EQ(32766, WEBRTC_SPL_SHIFT_W16(a, 1));
-    EXPECT_EQ(32766, WEBRTC_SPL_SHIFT_W32(a, 1));
-
-    // Shifting with negative numbers not allowed
-    // We cannot do casting here due to signed/unsigned problem
-    EXPECT_EQ(8191, WEBRTC_SPL_RSHIFT_W16(a, 1));
-    EXPECT_EQ(32766, WEBRTC_SPL_LSHIFT_W16(a, 1));
-    EXPECT_EQ(8191, WEBRTC_SPL_RSHIFT_W32(a, 1));
-    EXPECT_EQ(32766, WEBRTC_SPL_LSHIFT_W32(a, 1));
-
-    EXPECT_EQ(8191, WEBRTC_SPL_RSHIFT_U16(a, 1));
-    EXPECT_EQ(32766, WEBRTC_SPL_LSHIFT_U16(a, 1));
-    EXPECT_EQ(8191, WEBRTC_SPL_RSHIFT_U32(a, 1));
-    EXPECT_EQ(32766, WEBRTC_SPL_LSHIFT_U32(a, 1));
-
-    EXPECT_EQ(1470, WEBRTC_SPL_RAND(A));
-}
-
-TEST_F(SplTest, InlineTest) {
-
-    WebRtc_Word16 a = 121;
-    WebRtc_Word16 b = -17;
-    WebRtc_Word32 A = 111121;
-    WebRtc_Word32 B = -1711;
-    char* bVersion = (char*) malloc(8);
-
-    EXPECT_EQ(104, WebRtcSpl_AddSatW16(a, b));
-    EXPECT_EQ(138, WebRtcSpl_SubSatW16(a, b));
-
-    EXPECT_EQ(109410, WebRtcSpl_AddSatW32(A, B));
-    EXPECT_EQ(112832, WebRtcSpl_SubSatW32(A, B));
-
-    EXPECT_EQ(17, WebRtcSpl_GetSizeInBits(A));
-    EXPECT_EQ(14, WebRtcSpl_NormW32(A));
-    EXPECT_EQ(4, WebRtcSpl_NormW16(B));
-    EXPECT_EQ(15, WebRtcSpl_NormU32(A));
-
-    EXPECT_EQ(0, WebRtcSpl_get_version(bVersion, 8));
-}
-
-TEST_F(SplTest, MathOperationsTest) {
-
-    int A = 117;
-    WebRtc_Word32 num = 117;
-    WebRtc_Word32 den = -5;
-    WebRtc_UWord16 denU = 5;
-    EXPECT_EQ(10, WebRtcSpl_Sqrt(A));
-    EXPECT_EQ(10, WebRtcSpl_SqrtFloor(A));
-
-
-    EXPECT_EQ(-91772805, WebRtcSpl_DivResultInQ31(den, num));
-    EXPECT_EQ(-23, WebRtcSpl_DivW32W16ResW16(num, (WebRtc_Word16)den));
-    EXPECT_EQ(-23, WebRtcSpl_DivW32W16(num, (WebRtc_Word16)den));
-    EXPECT_EQ(23, WebRtcSpl_DivU32U16(num, denU));
-    EXPECT_EQ(0, WebRtcSpl_DivW32HiLow(128, 0, 256));
-}
-
-TEST_F(SplTest, BasicArrayOperationsTest) {
-
-
-    int B[] = {4, 12, 133, 1100};
-    int Bs[] = {2, 6, 66, 550};
-    WebRtc_UWord8* b8 = (WebRtc_UWord8*) malloc(4);
-    WebRtc_Word16* b16 = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word32* b32 = (WebRtc_Word32*) malloc(4);
-
-    WebRtc_UWord8* bTmp8 = (WebRtc_UWord8*) malloc(4);
-    WebRtc_Word16* bTmp16 = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word32* bTmp32 = (WebRtc_Word32*) malloc(4);
-
-    WebRtcSpl_MemSetW16(b16, 3, 4);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(3, b16[kk]);
-    }
-    EXPECT_EQ(4, WebRtcSpl_ZerosArrayW16(b16, 4));
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(0, b16[kk]);
-    }
-    EXPECT_EQ(4, WebRtcSpl_OnesArrayW16(b16, 4));
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(1, b16[kk]);
-    }
-    WebRtcSpl_MemSetW32(b32, 3, 4);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(3, b32[kk]);
-    }
-    EXPECT_EQ(4, WebRtcSpl_ZerosArrayW32(b32, 4));
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(0, b32[kk]);
-    }
-    EXPECT_EQ(4, WebRtcSpl_OnesArrayW32(b32, 4));
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(1, b32[kk]);
-    }
-    for (int kk = 0; kk < 4; ++kk) {
-        bTmp8[kk] = (WebRtc_Word8)kk;
-        bTmp16[kk] = (WebRtc_Word16)kk;
-        bTmp32[kk] = (WebRtc_Word32)kk;
-    }
-    WEBRTC_SPL_MEMCPY_W8(b8, bTmp8, 4);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(b8[kk], bTmp8[kk]);
-    }
-    WEBRTC_SPL_MEMCPY_W16(b16, bTmp16, 4);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(b16[kk], bTmp16[kk]);
-    }
-//    WEBRTC_SPL_MEMCPY_W32(b32, bTmp32, 4);
-//    for (int kk = 0; kk < 4; ++kk) {
-//        EXPECT_EQ(b32[kk], bTmp32[kk]);
-//    }
-    EXPECT_EQ(2, WebRtcSpl_CopyFromEndW16(b16, 4, 2, bTmp16));
-    for (int kk = 0; kk < 2; ++kk) {
-        EXPECT_EQ(kk+2, bTmp16[kk]);
-    }
-
-    for (int kk = 0; kk < 4; ++kk) {
-        b32[kk] = B[kk];
-        b16[kk] = (WebRtc_Word16)B[kk];
-    }
-    WebRtcSpl_VectorBitShiftW32ToW16(bTmp16, 4, b32, 1);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ((B[kk]>>1), bTmp16[kk]);
-    }
-    WebRtcSpl_VectorBitShiftW16(bTmp16, 4, b16, 1);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ((B[kk]>>1), bTmp16[kk]);
-    }
-    WebRtcSpl_VectorBitShiftW32(bTmp32, 4, b32, 1);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ((B[kk]>>1), bTmp32[kk]);
-    }
-
-    WebRtcSpl_MemCpyReversedOrder(&bTmp16[3], b16, 4);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(b16[3-kk], bTmp16[kk]);
-    }
-
-}
-
-TEST_F(SplTest, MinMaxOperationsTest) {
-
-
-    int B[] = {4, 12, 133, -1100};
-    WebRtc_Word16* b16 = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word32* b32 = (WebRtc_Word32*) malloc(4);
-
-    for (int kk = 0; kk < 4; ++kk) {
-        b16[kk] = B[kk];
-        b32[kk] = B[kk];
-    }
-
-    EXPECT_EQ(1100, WebRtcSpl_MaxAbsValueW16(b16, 4));
-    EXPECT_EQ(1100, WebRtcSpl_MaxAbsValueW32(b32, 4));
-    EXPECT_EQ(133, WebRtcSpl_MaxValueW16(b16, 4));
-    EXPECT_EQ(133, WebRtcSpl_MaxValueW32(b32, 4));
-    EXPECT_EQ(3, WebRtcSpl_MaxAbsIndexW16(b16, 4));
-    EXPECT_EQ(2, WebRtcSpl_MaxIndexW16(b16, 4));
-    EXPECT_EQ(2, WebRtcSpl_MaxIndexW32(b32, 4));
-
-    EXPECT_EQ(-1100, WebRtcSpl_MinValueW16(b16, 4));
-    EXPECT_EQ(-1100, WebRtcSpl_MinValueW32(b32, 4));
-    EXPECT_EQ(3, WebRtcSpl_MinIndexW16(b16, 4));
-    EXPECT_EQ(3, WebRtcSpl_MinIndexW32(b32, 4));
-
-    EXPECT_EQ(0, WebRtcSpl_GetScalingSquare(b16, 4, 1));
-
-}
-
-TEST_F(SplTest, VectorOperationsTest) {
-
-
-    int B[] = {4, 12, 133, 1100};
-    WebRtc_Word16* a16 = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word16* b16 = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word32* b32 = (WebRtc_Word32*) malloc(4);
-    WebRtc_Word16* bTmp16 = (WebRtc_Word16*) malloc(4);
-
-    for (int kk = 0; kk < 4; ++kk) {
-        a16[kk] = B[kk];
-        b16[kk] = B[kk];
-    }
-
-    WebRtcSpl_AffineTransformVector(bTmp16, b16, 3, 7, 2, 4);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ((B[kk]*3+7)>>2, bTmp16[kk]);
-    }
-    WebRtcSpl_ScaleAndAddVectorsWithRound(b16, 3, b16, 2, 2, bTmp16, 4);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ((B[kk]*3+B[kk]*2+2)>>2, bTmp16[kk]);
-    }
-
-    WebRtcSpl_AddAffineVectorToVector(bTmp16, b16, 3, 7, 2, 4);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(((B[kk]*3+B[kk]*2+2)>>2)+((b16[kk]*3+7)>>2), bTmp16[kk]);
-    }
-
-    WebRtcSpl_CrossCorrelation(b32, b16, bTmp16, 4, 2, 2, 0);
-    for (int kk = 0; kk < 2; ++kk) {
-        EXPECT_EQ(614236, b32[kk]);
-    }
-//    EXPECT_EQ(, WebRtcSpl_DotProduct(b16, bTmp16, 4));
-    EXPECT_EQ(306962, WebRtcSpl_DotProductWithScale(b16, b16, 4, 2));
-
-    WebRtcSpl_ScaleVector(b16, bTmp16, 13, 4, 2);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ((b16[kk]*13)>>2, bTmp16[kk]);
-    }
-    WebRtcSpl_ScaleVectorWithSat(b16, bTmp16, 13, 4, 2);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ((b16[kk]*13)>>2, bTmp16[kk]);
-    }
-    WebRtcSpl_ScaleAndAddVectors(a16, 13, 2, b16, 7, 2, bTmp16, 4);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(((a16[kk]*13)>>2)+((b16[kk]*7)>>2), bTmp16[kk]);
-    }
-
-    WebRtcSpl_AddVectorsAndShift(bTmp16, a16, b16, 4, 2);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(B[kk] >> 1, bTmp16[kk]);
-    }
-    WebRtcSpl_ReverseOrderMultArrayElements(bTmp16, a16, &b16[3], 4, 2);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ((a16[kk]*b16[3-kk])>>2, bTmp16[kk]);
-    }
-    WebRtcSpl_ElementwiseVectorMult(bTmp16, a16, b16, 4, 6);
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ((a16[kk]*b16[kk])>>6, bTmp16[kk]);
-    }
-
-    WebRtcSpl_SqrtOfOneMinusXSquared(b16, 4, bTmp16);
-    for (int kk = 0; kk < 3; ++kk) {
-        EXPECT_EQ(32767, bTmp16[kk]);
-    }
-    EXPECT_EQ(32749, bTmp16[3]);
-}
-
-TEST_F(SplTest, EstimatorsTest) {
-
-
-    int B[] = {4, 12, 133, 1100};
-    WebRtc_Word16* b16 = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word32* b32 = (WebRtc_Word32*) malloc(4);
-    WebRtc_Word16* bTmp16 = (WebRtc_Word16*) malloc(4);
-
-    for (int kk = 0; kk < 4; ++kk) {
-        b16[kk] = B[kk];
-        b32[kk] = B[kk];
-    }
-
-    EXPECT_EQ(0, WebRtcSpl_LevinsonDurbin(b32, b16, bTmp16, 2));
-
-}
-
-TEST_F(SplTest, FilterTest) {
-
-
-    WebRtc_Word16 A[] = {1, 2, 33, 100};
-    WebRtc_Word16 A5[] = {1, 2, 33, 100, -5};
-    WebRtc_Word16 B[] = {4, 12, 133, 110};
-    WebRtc_Word16* b16 = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word16* bTmp16 = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word16* bTmp16Low = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word16* bState = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word16* bStateLow = (WebRtc_Word16*) malloc(4);
-
-    WebRtcSpl_ZerosArrayW16(bState, 4);
-    WebRtcSpl_ZerosArrayW16(bStateLow, 4);
-
-    for (int kk = 0; kk < 4; ++kk) {
-        b16[kk] = A[kk];
-    }
-
-    // MA filters
-    WebRtcSpl_FilterMAFastQ12(b16, bTmp16, B, 4, 4);
-    for (int kk = 0; kk < 4; ++kk) {
-        //EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
-    }
-    // AR filters
-    WebRtcSpl_FilterARFastQ12(b16, bTmp16, A, 4, 4);
-    for (int kk = 0; kk < 4; ++kk) {
-//        EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
-    }
-    EXPECT_EQ(4, WebRtcSpl_FilterAR(A5, 5, b16, 4, bState, 4, bStateLow, 4, bTmp16, bTmp16Low, 4));
-
-}
-
-TEST_F(SplTest, RandTest) {
-
-
-    WebRtc_Word16 BU[] = {3653, 12446, 8525, 30691};
-    WebRtc_Word16 BN[] = {3459, -11689, -258, -3738};
-    WebRtc_Word16* b16 = (WebRtc_Word16*) malloc(4);
-    WebRtc_UWord32* bSeed = (WebRtc_UWord32*) malloc(1);
-
-    bSeed[0] = 100000;
-
-    EXPECT_EQ(464449057, WebRtcSpl_IncreaseSeed(bSeed));
-    EXPECT_EQ(31565, WebRtcSpl_RandU(bSeed));
-    EXPECT_EQ(-9786, WebRtcSpl_RandN(bSeed));
-    EXPECT_EQ(4, WebRtcSpl_RandUArray(b16, 4, bSeed));
-    for (int kk = 0; kk < 4; ++kk) {
-        EXPECT_EQ(BU[kk], b16[kk]);
-    }
-}
-
-TEST_F(SplTest, SignalProcessingTest) {
-
-
-    int A[] = {1, 2, 33, 100};
-    WebRtc_Word16* b16 = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word32* b32 = (WebRtc_Word32*) malloc(4);
-
-    WebRtc_Word16* bTmp16 = (WebRtc_Word16*) malloc(4);
-    WebRtc_Word32* bTmp32 = (WebRtc_Word32*) malloc(4);
-
-    int bScale = 0;
-
-    for (int kk = 0; kk < 4; ++kk) {
-        b16[kk] = A[kk];
-        b32[kk] = A[kk];
-    }
-
-    EXPECT_EQ(2, WebRtcSpl_AutoCorrelation(b16, 4, 1, bTmp32, &bScale));
-    WebRtcSpl_ReflCoefToLpc(b16, 4, bTmp16);
-//    for (int kk = 0; kk < 4; ++kk) {
-//        EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
-//    }
-    WebRtcSpl_LpcToReflCoef(bTmp16, 4, b16);
-//    for (int kk = 0; kk < 4; ++kk) {
-//        EXPECT_EQ(a16[kk], b16[kk]);
-//    }
-    WebRtcSpl_AutoCorrToReflCoef(b32, 4, bTmp16);
-//    for (int kk = 0; kk < 4; ++kk) {
-//        EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
-//    }
-    WebRtcSpl_GetHanningWindow(bTmp16, 4);
-//    for (int kk = 0; kk < 4; ++kk) {
-//        EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
-//    }
-
-    for (int kk = 0; kk < 4; ++kk) {
-        b16[kk] = A[kk];
-    }
-    EXPECT_EQ(11094 , WebRtcSpl_Energy(b16, 4, &bScale));
-    EXPECT_EQ(0, bScale);
-}
-
-TEST_F(SplTest, FFTTest) {
-
-
-    WebRtc_Word16 B[] = {1, 2, 33, 100,
-            2, 3, 34, 101,
-            3, 4, 35, 102,
-            4, 5, 36, 103};
-
-    EXPECT_EQ(0, WebRtcSpl_ComplexFFT(B, 3, 1));
-//    for (int kk = 0; kk < 16; ++kk) {
-//        EXPECT_EQ(A[kk], B[kk]);
-//    }
-    EXPECT_EQ(0, WebRtcSpl_ComplexIFFT(B, 3, 1));
-//    for (int kk = 0; kk < 16; ++kk) {
-//        EXPECT_EQ(A[kk], B[kk]);
-//    }
-    WebRtcSpl_ComplexBitReverse(B, 3);
-    for (int kk = 0; kk < 16; ++kk) {
-        //EXPECT_EQ(A[kk], B[kk]);
-    }
-}
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  SplEnvironment* env = new SplEnvironment;
-  ::testing::AddGlobalTestEnvironment(env);
-
-  return RUN_ALL_TESTS();
-}
diff --git a/src/common_audio/signal_processing_library/main/test/unit_test/unit_test.h b/src/common_audio/signal_processing_library/main/test/unit_test/unit_test.h
deleted file mode 100644
index d7babe7..0000000
--- a/src/common_audio/signal_processing_library/main/test/unit_test/unit_test.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-/*
- * This header file contains the function WebRtcSpl_CopyFromBeginU8().
- * The description header can be found in signal_processing_library.h
- *
- */
-
-#ifndef WEBRTC_SPL_UNIT_TEST_H_
-#define WEBRTC_SPL_UNIT_TEST_H_
-
-#include <gtest/gtest.h>
-
-class SplTest: public ::testing::Test
-{
-protected:
-    SplTest();
-    virtual void SetUp();
-    virtual void TearDown();
-};
-
-#endif  // WEBRTC_SPL_UNIT_TEST_H_
diff --git a/src/common_audio/vad/Android.mk b/src/common_audio/vad/Android.mk
new file mode 100644
index 0000000..b7be3f0
--- /dev/null
+++ b/src/common_audio/vad/Android.mk
@@ -0,0 +1,50 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+include $(LOCAL_PATH)/../../../android-webrtc.mk
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := libwebrtc_vad
+LOCAL_MODULE_TAGS := optional
+LOCAL_SRC_FILES := \
+    webrtc_vad.c \
+    vad_core.c \
+    vad_filterbank.c \
+    vad_gmm.c \
+    vad_sp.c
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS)
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/include \
+    $(LOCAL_PATH)/../.. \
+    $(LOCAL_PATH)/../signal_processing/include 
+
+LOCAL_SHARED_LIBRARIES := \
+    libdl \
+    libstlport
+
+ifeq ($(TARGET_OS)-$(TARGET_SIMULATOR),linux-true)
+LOCAL_LDLIBS += -ldl -lpthread
+endif
+
+ifneq ($(TARGET_SIMULATOR),true)
+LOCAL_SHARED_LIBRARIES += libdl
+endif
+
+ifndef NDK_ROOT
+include external/stlport/libstlport.mk
+endif
+include $(BUILD_STATIC_LIBRARY)
diff --git a/src/common_audio/vad/OWNERS b/src/common_audio/vad/OWNERS
deleted file mode 100644
index 9132851..0000000
--- a/src/common_audio/vad/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-bjornv@google.com
-jks@google.com
diff --git a/src/common_audio/vad/main/interface/webrtc_vad.h b/src/common_audio/vad/include/webrtc_vad.h
similarity index 100%
rename from src/common_audio/vad/main/interface/webrtc_vad.h
rename to src/common_audio/vad/include/webrtc_vad.h
diff --git a/src/common_audio/vad/main/source/Android.mk b/src/common_audio/vad/main/source/Android.mk
deleted file mode 100644
index f52df93..0000000
--- a/src/common_audio/vad/main/source/Android.mk
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_ARM_MODE := arm
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_MODULE := libwebrtc_vad
-LOCAL_MODULE_TAGS := optional
-LOCAL_GENERATED_SOURCES :=
-LOCAL_SRC_FILES := webrtc_vad.c \
-    vad_const.c \
-    vad_core.c \
-    vad_filterbank.c \
-    vad_gmm.c \
-    vad_sp.c
-
-# Flags passed to both C and C++ files.
-MY_CFLAGS :=  
-MY_CFLAGS_C :=
-MY_DEFS := '-DNO_TCMALLOC' \
-    '-DNO_HEAPCHECKER' \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' 
-ifeq ($(TARGET_ARCH),arm) 
-MY_DEFS += \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
-endif
-LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
-
-# Include paths placed before CFLAGS/CPPFLAGS
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/../../../.. \
-    $(LOCAL_PATH)/../interface \
-    $(LOCAL_PATH)/../../../signal_processing_library/main/interface 
-
-# Flags passed to only C++ (and not C) files.
-LOCAL_CPPFLAGS := 
-
-LOCAL_LDFLAGS :=
-
-LOCAL_STATIC_LIBRARIES :=
-
-LOCAL_SHARED_LIBRARIES := libdl \
-    libstlport
-LOCAL_ADDITIONAL_DEPENDENCIES :=
-
-ifeq ($(TARGET_OS)-$(TARGET_SIMULATOR),linux-true)
-LOCAL_LDLIBS += -ldl -lpthread
-endif
-
-ifneq ($(TARGET_SIMULATOR),true)
-LOCAL_SHARED_LIBRARIES += libdl
-endif
-
-include external/stlport/libstlport.mk
-include $(BUILD_STATIC_LIBRARY)
diff --git a/src/common_audio/vad/main/source/vad_const.c b/src/common_audio/vad/main/source/vad_const.c
deleted file mode 100644
index 47b6a4b..0000000
--- a/src/common_audio/vad/main/source/vad_const.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-/*
- * This file includes the constant values used internally in VAD.
- */
-
-#include "vad_const.h"
-
-// Spectrum Weighting
-const WebRtc_Word16 kSpectrumWeight[6] = {6, 8, 10, 12, 14, 16};
-
-const WebRtc_Word16 kCompVar = 22005;
-
-// Constant 160*log10(2) in Q9
-const WebRtc_Word16 kLogConst = 24660;
-
-// Constant log2(exp(1)) in Q12
-const WebRtc_Word16 kLog10Const = 5909;
-
-// Q15
-const WebRtc_Word16 kNoiseUpdateConst = 655;
-const WebRtc_Word16 kSpeechUpdateConst = 6554;
-
-// Q8
-const WebRtc_Word16 kBackEta = 154;
-
-// Coefficients used by WebRtcVad_HpOutput, Q14
-const WebRtc_Word16 kHpZeroCoefs[3] = {6631, -13262, 6631};
-const WebRtc_Word16 kHpPoleCoefs[3] = {16384, -7756, 5620};
-
-// Allpass filter coefficients, upper and lower, in Q15
-// Upper: 0.64, Lower: 0.17
-const WebRtc_Word16 kAllPassCoefsQ15[2] = {20972, 5571};
-const WebRtc_Word16 kAllPassCoefsQ13[2] = {5243, 1392}; // Q13
-
-// Minimum difference between the two models, Q5
-const WebRtc_Word16 kMinimumDifference[6] = {544, 544, 576, 576, 576, 576};
-
-// Upper limit of mean value for speech model, Q7
-const WebRtc_Word16 kMaximumSpeech[6] = {11392, 11392, 11520, 11520, 11520, 11520};
-
-// Minimum value for mean value
-const WebRtc_Word16 kMinimumMean[2] = {640, 768};
-
-// Upper limit of mean value for noise model, Q7
-const WebRtc_Word16 kMaximumNoise[6] = {9216, 9088, 8960, 8832, 8704, 8576};
-
-// Adjustment for division with two in WebRtcVad_SplitFilter
-const WebRtc_Word16 kOffsetVector[6] = {368, 368, 272, 176, 176, 176};
-
-// Start values for the Gaussian models, Q7
-// Weights for the two Gaussians for the six channels (noise)
-const WebRtc_Word16 kNoiseDataWeights[12] = {34, 62, 72, 66, 53, 25, 94, 66, 56, 62, 75, 103};
-
-// Weights for the two Gaussians for the six channels (speech)
-const WebRtc_Word16 kSpeechDataWeights[12] = {48, 82, 45, 87, 50, 47, 80, 46, 83, 41, 78, 81};
-
-// Means for the two Gaussians for the six channels (noise)
-const WebRtc_Word16 kNoiseDataMeans[12] = {6738, 4892, 7065, 6715, 6771, 3369, 7646, 3863,
-        7820, 7266, 5020, 4362};
-
-// Means for the two Gaussians for the six channels (speech)
-const WebRtc_Word16 kSpeechDataMeans[12] = {8306, 10085, 10078, 11823, 11843, 6309, 9473,
-        9571, 10879, 7581, 8180, 7483};
-
-// Stds for the two Gaussians for the six channels (noise)
-const WebRtc_Word16 kNoiseDataStds[12] = {378, 1064, 493, 582, 688, 593, 474, 697, 475, 688,
-        421, 455};
-
-// Stds for the two Gaussians for the six channels (speech)
-const WebRtc_Word16 kSpeechDataStds[12] = {555, 505, 567, 524, 585, 1231, 509, 828, 492, 1540,
-        1079, 850};
diff --git a/src/common_audio/vad/main/source/vad_const.h b/src/common_audio/vad/main/source/vad_const.h
deleted file mode 100644
index 8980437..0000000
--- a/src/common_audio/vad/main/source/vad_const.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This header file includes the declarations of the internally used constants.
- */
-
-#ifndef WEBRTC_VAD_CONST_H_
-#define WEBRTC_VAD_CONST_H_
-
-#include "typedefs.h"
-
-// TODO(ajm): give these internal-linkage by moving to the appropriate file
-// where possible, and otherwise tag with WebRtcVad_.
-
-// Spectrum Weighting
-extern const WebRtc_Word16 kSpectrumWeight[];
-extern const WebRtc_Word16 kCompVar;
-// Logarithm constant
-extern const WebRtc_Word16 kLogConst;
-extern const WebRtc_Word16 kLog10Const;
-// Q15
-extern const WebRtc_Word16 kNoiseUpdateConst;
-extern const WebRtc_Word16 kSpeechUpdateConst;
-// Q8
-extern const WebRtc_Word16 kBackEta;
-// Coefficients used by WebRtcVad_HpOutput, Q14
-extern const WebRtc_Word16 kHpZeroCoefs[];
-extern const WebRtc_Word16 kHpPoleCoefs[];
-// Allpass filter coefficients, upper and lower, in Q15 resp. Q13
-extern const WebRtc_Word16 kAllPassCoefsQ15[];
-extern const WebRtc_Word16 kAllPassCoefsQ13[];
-// Minimum difference between the two models, Q5
-extern const WebRtc_Word16 kMinimumDifference[];
-// Maximum value when updating the speech model, Q7
-extern const WebRtc_Word16 kMaximumSpeech[];
-// Minimum value for mean value
-extern const WebRtc_Word16 kMinimumMean[];
-// Upper limit of mean value for noise model, Q7
-extern const WebRtc_Word16 kMaximumNoise[];
-// Adjustment for division with two in WebRtcVad_SplitFilter
-extern const WebRtc_Word16 kOffsetVector[];
-// Start values for the Gaussian models, Q7
-extern const WebRtc_Word16 kNoiseDataWeights[];
-extern const WebRtc_Word16 kSpeechDataWeights[];
-extern const WebRtc_Word16 kNoiseDataMeans[];
-extern const WebRtc_Word16 kSpeechDataMeans[];
-extern const WebRtc_Word16 kNoiseDataStds[];
-extern const WebRtc_Word16 kSpeechDataStds[];
-
-#endif // WEBRTC_VAD_CONST_H_
diff --git a/src/common_audio/vad/main/source/vad_filterbank.c b/src/common_audio/vad/main/source/vad_filterbank.c
deleted file mode 100644
index 11392c9..0000000
--- a/src/common_audio/vad/main/source/vad_filterbank.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file includes the implementation of the internal filterbank associated functions.
- * For function description, see vad_filterbank.h.
- */
-
-#include "vad_filterbank.h"
-#include "vad_defines.h"
-#include "vad_const.h"
-#include "signal_processing_library.h"
-
-void WebRtcVad_HpOutput(WebRtc_Word16 *in_vector,
-                        WebRtc_Word16 in_vector_length,
-                        WebRtc_Word16 *out_vector,
-                        WebRtc_Word16 *filter_state)
-{
-    WebRtc_Word16 i, *pi, *outPtr;
-    WebRtc_Word32 tmpW32;
-
-    pi = &in_vector[0];
-    outPtr = &out_vector[0];
-
-    // The sum of the absolute values of the impulse response:
-    // The zero/pole-filter has a max amplification of a single sample of: 1.4546
-    // Impulse response: 0.4047 -0.6179 -0.0266  0.1993  0.1035  -0.0194
-    // The all-zero section has a max amplification of a single sample of: 1.6189
-    // Impulse response: 0.4047 -0.8094  0.4047  0       0        0
-    // The all-pole section has a max amplification of a single sample of: 1.9931
-    // Impulse response: 1.0000  0.4734 -0.1189 -0.2187 -0.0627   0.04532
-
-    for (i = 0; i < in_vector_length; i++)
-    {
-        // all-zero section (filter coefficients in Q14)
-        tmpW32 = (WebRtc_Word32)WEBRTC_SPL_MUL_16_16(kHpZeroCoefs[0], (*pi));
-        tmpW32 += (WebRtc_Word32)WEBRTC_SPL_MUL_16_16(kHpZeroCoefs[1], filter_state[0]);
-        tmpW32 += (WebRtc_Word32)WEBRTC_SPL_MUL_16_16(kHpZeroCoefs[2], filter_state[1]); // Q14
-        filter_state[1] = filter_state[0];
-        filter_state[0] = *pi++;
-
-        // all-pole section
-        tmpW32 -= (WebRtc_Word32)WEBRTC_SPL_MUL_16_16(kHpPoleCoefs[1], filter_state[2]); // Q14
-        tmpW32 -= (WebRtc_Word32)WEBRTC_SPL_MUL_16_16(kHpPoleCoefs[2], filter_state[3]);
-        filter_state[3] = filter_state[2];
-        filter_state[2] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32 (tmpW32, 14);
-        *outPtr++ = filter_state[2];
-    }
-}
-
-void WebRtcVad_Allpass(WebRtc_Word16 *in_vector,
-                       WebRtc_Word16 *out_vector,
-                       WebRtc_Word16 filter_coefficients,
-                       int vector_length,
-                       WebRtc_Word16 *filter_state)
-{
-    // The filter can only cause overflow (in the w16 output variable)
-    // if more than 4 consecutive input numbers are of maximum value and
-    // has the the same sign as the impulse responses first taps.
-    // First 6 taps of the impulse response: 0.6399 0.5905 -0.3779
-    // 0.2418 -0.1547 0.0990
-
-    int n;
-    WebRtc_Word16 tmp16;
-    WebRtc_Word32 tmp32, in32, state32;
-
-    state32 = WEBRTC_SPL_LSHIFT_W32(((WebRtc_Word32)(*filter_state)), 16); // Q31
-
-    for (n = 0; n < vector_length; n++)
-    {
-
-        tmp32 = state32 + WEBRTC_SPL_MUL_16_16(filter_coefficients, (*in_vector));
-        tmp16 = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 16);
-        *out_vector++ = tmp16;
-        in32 = WEBRTC_SPL_LSHIFT_W32(((WebRtc_Word32)(*in_vector)), 14);
-        state32 = in32 - WEBRTC_SPL_MUL_16_16(filter_coefficients, tmp16);
-        state32 = WEBRTC_SPL_LSHIFT_W32(state32, 1);
-        in_vector += 2;
-    }
-
-    *filter_state = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(state32, 16);
-}
-
-void WebRtcVad_SplitFilter(WebRtc_Word16 *in_vector,
-                           WebRtc_Word16 *out_vector_hp,
-                           WebRtc_Word16 *out_vector_lp,
-                           WebRtc_Word16 *upper_state,
-                           WebRtc_Word16 *lower_state,
-                           int in_vector_length)
-{
-    WebRtc_Word16 tmpOut;
-    int k, halflen;
-
-    // Downsampling by 2 and get two branches
-    halflen = WEBRTC_SPL_RSHIFT_W16(in_vector_length, 1);
-
-    // All-pass filtering upper branch
-    WebRtcVad_Allpass(&in_vector[0], out_vector_hp, kAllPassCoefsQ15[0], halflen, upper_state);
-
-    // All-pass filtering lower branch
-    WebRtcVad_Allpass(&in_vector[1], out_vector_lp, kAllPassCoefsQ15[1], halflen, lower_state);
-
-    // Make LP and HP signals
-    for (k = 0; k < halflen; k++)
-    {
-        tmpOut = *out_vector_hp;
-        *out_vector_hp++ -= *out_vector_lp;
-        *out_vector_lp++ += tmpOut;
-    }
-}
-
-WebRtc_Word16 WebRtcVad_get_features(VadInstT *inst,
-                                     WebRtc_Word16 *in_vector,
-                                     int frame_size,
-                                     WebRtc_Word16 *out_vector)
-{
-    int curlen, filtno;
-    WebRtc_Word16 vecHP1[120], vecLP1[120];
-    WebRtc_Word16 vecHP2[60], vecLP2[60];
-    WebRtc_Word16 *ptin;
-    WebRtc_Word16 *hptout, *lptout;
-    WebRtc_Word16 power = 0;
-
-    // Split at 2000 Hz and downsample
-    filtno = 0;
-    ptin = in_vector;
-    hptout = vecHP1;
-    lptout = vecLP1;
-    curlen = frame_size;
-    WebRtcVad_SplitFilter(ptin, hptout, lptout, &inst->upper_state[filtno],
-                  &inst->lower_state[filtno], curlen);
-
-    // Split at 3000 Hz and downsample
-    filtno = 1;
-    ptin = vecHP1;
-    hptout = vecHP2;
-    lptout = vecLP2;
-    curlen = WEBRTC_SPL_RSHIFT_W16(frame_size, 1);
-
-    WebRtcVad_SplitFilter(ptin, hptout, lptout, &inst->upper_state[filtno],
-                  &inst->lower_state[filtno], curlen);
-
-    // Energy in 3000 Hz - 4000 Hz
-    curlen = WEBRTC_SPL_RSHIFT_W16(curlen, 1);
-    WebRtcVad_LogOfEnergy(vecHP2, &out_vector[5], &power, kOffsetVector[5], curlen);
-
-    // Energy in 2000 Hz - 3000 Hz
-    WebRtcVad_LogOfEnergy(vecLP2, &out_vector[4], &power, kOffsetVector[4], curlen);
-
-    // Split at 1000 Hz and downsample
-    filtno = 2;
-    ptin = vecLP1;
-    hptout = vecHP2;
-    lptout = vecLP2;
-    curlen = WEBRTC_SPL_RSHIFT_W16(frame_size, 1);
-    WebRtcVad_SplitFilter(ptin, hptout, lptout, &inst->upper_state[filtno],
-                  &inst->lower_state[filtno], curlen);
-
-    // Energy in 1000 Hz - 2000 Hz
-    curlen = WEBRTC_SPL_RSHIFT_W16(curlen, 1);
-    WebRtcVad_LogOfEnergy(vecHP2, &out_vector[3], &power, kOffsetVector[3], curlen);
-
-    // Split at 500 Hz
-    filtno = 3;
-    ptin = vecLP2;
-    hptout = vecHP1;
-    lptout = vecLP1;
-
-    WebRtcVad_SplitFilter(ptin, hptout, lptout, &inst->upper_state[filtno],
-                  &inst->lower_state[filtno], curlen);
-
-    // Energy in 500 Hz - 1000 Hz
-    curlen = WEBRTC_SPL_RSHIFT_W16(curlen, 1);
-    WebRtcVad_LogOfEnergy(vecHP1, &out_vector[2], &power, kOffsetVector[2], curlen);
-    // Split at 250 Hz
-    filtno = 4;
-    ptin = vecLP1;
-    hptout = vecHP2;
-    lptout = vecLP2;
-
-    WebRtcVad_SplitFilter(ptin, hptout, lptout, &inst->upper_state[filtno],
-                  &inst->lower_state[filtno], curlen);
-
-    // Energy in 250 Hz - 500 Hz
-    curlen = WEBRTC_SPL_RSHIFT_W16(curlen, 1);
-    WebRtcVad_LogOfEnergy(vecHP2, &out_vector[1], &power, kOffsetVector[1], curlen);
-
-    // Remove DC and LFs
-    WebRtcVad_HpOutput(vecLP2, curlen, vecHP1, inst->hp_filter_state);
-
-    // Power in 80 Hz - 250 Hz
-    WebRtcVad_LogOfEnergy(vecHP1, &out_vector[0], &power, kOffsetVector[0], curlen);
-
-    return power;
-}
-
-void WebRtcVad_LogOfEnergy(WebRtc_Word16 *vector,
-                           WebRtc_Word16 *enerlogval,
-                           WebRtc_Word16 *power,
-                           WebRtc_Word16 offset,
-                           int vector_length)
-{
-    WebRtc_Word16 enerSum = 0;
-    WebRtc_Word16 zeros, frac, log2;
-    WebRtc_Word32 energy;
-
-    int shfts = 0, shfts2;
-
-    energy = WebRtcSpl_Energy(vector, vector_length, &shfts);
-
-    if (energy > 0)
-    {
-
-        shfts2 = 16 - WebRtcSpl_NormW32(energy);
-        shfts += shfts2;
-        // "shfts" is the total number of right shifts that has been done to enerSum.
-        enerSum = (WebRtc_Word16)WEBRTC_SPL_SHIFT_W32(energy, -shfts2);
-
-        // Find:
-        // 160*log10(enerSum*2^shfts) = 160*log10(2)*log2(enerSum*2^shfts) =
-        // 160*log10(2)*(log2(enerSum) + log2(2^shfts)) =
-        // 160*log10(2)*(log2(enerSum) + shfts)
-
-        zeros = WebRtcSpl_NormU32(enerSum);
-        frac = (WebRtc_Word16)(((WebRtc_UWord32)((WebRtc_Word32)(enerSum) << zeros)
-                & 0x7FFFFFFF) >> 21);
-        log2 = (WebRtc_Word16)(((31 - zeros) << 10) + frac);
-
-        *enerlogval = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(kLogConst, log2, 19)
-                + (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(shfts, kLogConst, 9);
-
-        if (*enerlogval < 0)
-        {
-            *enerlogval = 0;
-        }
-    } else
-    {
-        *enerlogval = 0;
-        shfts = -15;
-        enerSum = 0;
-    }
-
-    *enerlogval += offset;
-
-    // Total power in frame
-    if (*power <= MIN_ENERGY)
-    {
-        if (shfts > 0)
-        {
-            *power += MIN_ENERGY + 1;
-        } else if (WEBRTC_SPL_SHIFT_W16(enerSum, shfts) > MIN_ENERGY)
-        {
-            *power += MIN_ENERGY + 1;
-        } else
-        {
-            *power += WEBRTC_SPL_SHIFT_W16(enerSum, shfts);
-        }
-    }
-}
diff --git a/src/common_audio/vad/main/source/vad_gmm.c b/src/common_audio/vad/main/source/vad_gmm.c
deleted file mode 100644
index 23d12fb..0000000
--- a/src/common_audio/vad/main/source/vad_gmm.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file includes the implementation of the internal VAD call
- * WebRtcVad_GaussianProbability. For function description, see vad_gmm.h.
- */
-
-#include "vad_gmm.h"
-#include "signal_processing_library.h"
-#include "vad_const.h"
-
-WebRtc_Word32 WebRtcVad_GaussianProbability(WebRtc_Word16 in_sample,
-                                            WebRtc_Word16 mean,
-                                            WebRtc_Word16 std,
-                                            WebRtc_Word16 *delta)
-{
-    WebRtc_Word16 tmp16, tmpDiv, tmpDiv2, expVal, tmp16_1, tmp16_2;
-    WebRtc_Word32 tmp32, y32;
-
-    // Calculate tmpDiv=1/std, in Q10
-    tmp32 = (WebRtc_Word32)WEBRTC_SPL_RSHIFT_W16(std,1) + (WebRtc_Word32)131072; // 1 in Q17
-    tmpDiv = (WebRtc_Word16)WebRtcSpl_DivW32W16(tmp32, std); // Q17/Q7 = Q10
-
-    // Calculate tmpDiv2=1/std^2, in Q14
-    tmp16 = WEBRTC_SPL_RSHIFT_W16(tmpDiv, 2); // From Q10 to Q8
-    tmpDiv2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16, tmp16, 2); // (Q8 * Q8)>>2 = Q14
-
-    tmp16 = WEBRTC_SPL_LSHIFT_W16(in_sample, 3); // Q7
-    tmp16 = tmp16 - mean; // Q7 - Q7 = Q7
-
-    // To be used later, when updating noise/speech model
-    // delta = (x-m)/std^2, in Q11
-    *delta = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmpDiv2, tmp16, 10); //(Q14*Q7)>>10 = Q11
-
-    // Calculate tmp32=(x-m)^2/(2*std^2), in Q10
-    tmp32 = (WebRtc_Word32)WEBRTC_SPL_MUL_16_16_RSFT(*delta, tmp16, 9); // One shift for /2
-
-    // Calculate expVal ~= exp(-(x-m)^2/(2*std^2)) ~= exp2(-log2(exp(1))*tmp32)
-    if (tmp32 < kCompVar)
-    {
-        // Calculate tmp16 = log2(exp(1))*tmp32 , in Q10
-        tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((WebRtc_Word16)tmp32,
-                                                         kLog10Const, 12);
-        tmp16 = -tmp16;
-        tmp16_2 = (WebRtc_Word16)(0x0400 | (tmp16 & 0x03FF));
-        tmp16_1 = (WebRtc_Word16)(tmp16 ^ 0xFFFF);
-        tmp16 = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W16(tmp16_1, 10);
-        tmp16 += 1;
-        // Calculate expVal=log2(-tmp32), in Q10
-        expVal = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32((WebRtc_Word32)tmp16_2, tmp16);
-
-    } else
-    {
-        expVal = 0;
-    }
-
-    // Calculate y32=(1/std)*exp(-(x-m)^2/(2*std^2)), in Q20
-    y32 = WEBRTC_SPL_MUL_16_16(tmpDiv, expVal); // Q10 * Q10 = Q20
-
-    return y32; // Q20
-}
diff --git a/src/common_audio/vad/main/source/vad_gmm.h b/src/common_audio/vad/main/source/vad_gmm.h
deleted file mode 100644
index e0747fb..0000000
--- a/src/common_audio/vad/main/source/vad_gmm.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This header file includes the description of the internal VAD call
- * WebRtcVad_GaussianProbability.
- */
-
-#ifndef WEBRTC_VAD_GMM_H_
-#define WEBRTC_VAD_GMM_H_
-
-#include "typedefs.h"
-
-/****************************************************************************
- * WebRtcVad_GaussianProbability(...)
- *
- * This function calculates the probability for the value 'in_sample', given that in_sample
- * comes from a normal distribution with mean 'mean' and standard deviation 'std'.
- *
- * Input:
- *      - in_sample     : Input sample in Q4
- *      - mean          : mean value in the statistical model, Q7
- *      - std           : standard deviation, Q7
- *
- * Output:
- *
- *      - delta         : Value used when updating the model, Q11
- *
- * Return:
- *      - out           : out = 1/std * exp(-(x-m)^2/(2*std^2));
- *                        Probability for x.
- *
- */
-WebRtc_Word32 WebRtcVad_GaussianProbability(WebRtc_Word16 in_sample,
-                                            WebRtc_Word16 mean,
-                                            WebRtc_Word16 std,
-                                            WebRtc_Word16 *delta);
-
-#endif // WEBRTC_VAD_GMM_H_
diff --git a/src/common_audio/vad/main/source/vad_sp.c b/src/common_audio/vad/main/source/vad_sp.c
deleted file mode 100644
index f347ab5..0000000
--- a/src/common_audio/vad/main/source/vad_sp.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file includes the implementation of the VAD internal calls for Downsampling and
- * FindMinimum.
- * For function call descriptions; See vad_sp.h.
- */
-
-#include "vad_sp.h"
-#include "vad_defines.h"
-#include "vad_const.h"
-#include "signal_processing_library.h"
-
-// Downsampling filter based on the splitting filter and the allpass functions
-// in vad_filterbank.c
-void WebRtcVad_Downsampling(WebRtc_Word16* signal_in,
-                            WebRtc_Word16* signal_out,
-                            WebRtc_Word32* filter_state,
-                            int inlen)
-{
-    WebRtc_Word16 tmp16_1, tmp16_2;
-    WebRtc_Word32 tmp32_1, tmp32_2;
-    int n, halflen;
-
-    // Downsampling by 2 and get two branches
-    halflen = WEBRTC_SPL_RSHIFT_W16(inlen, 1);
-
-    tmp32_1 = filter_state[0];
-    tmp32_2 = filter_state[1];
-
-    // Filter coefficients in Q13, filter state in Q0
-    for (n = 0; n < halflen; n++)
-    {
-        // All-pass filtering upper branch
-        tmp16_1 = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32_1, 1)
-                + (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((kAllPassCoefsQ13[0]),
-                                                           *signal_in, 14);
-        *signal_out = tmp16_1;
-        tmp32_1 = (WebRtc_Word32)(*signal_in++)
-                - (WebRtc_Word32)WEBRTC_SPL_MUL_16_16_RSFT((kAllPassCoefsQ13[0]), tmp16_1, 12);
-
-        // All-pass filtering lower branch
-        tmp16_2 = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32_2, 1)
-                + (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((kAllPassCoefsQ13[1]),
-                                                           *signal_in, 14);
-        *signal_out++ += tmp16_2;
-        tmp32_2 = (WebRtc_Word32)(*signal_in++)
-                - (WebRtc_Word32)WEBRTC_SPL_MUL_16_16_RSFT((kAllPassCoefsQ13[1]), tmp16_2, 12);
-    }
-    filter_state[0] = tmp32_1;
-    filter_state[1] = tmp32_2;
-}
-
-WebRtc_Word16 WebRtcVad_FindMinimum(VadInstT* inst,
-                                    WebRtc_Word16 x,
-                                    int n)
-{
-    int i, j, k, II = -1, offset;
-    WebRtc_Word16 meanV, alpha;
-    WebRtc_Word32 tmp32, tmp32_1;
-    WebRtc_Word16 *valptr, *idxptr, *p1, *p2, *p3;
-
-    // Offset to beginning of the 16 minimum values in memory
-    offset = WEBRTC_SPL_LSHIFT_W16(n, 4);
-
-    // Pointer to memory for the 16 minimum values and the age of each value
-    idxptr = &inst->index_vector[offset];
-    valptr = &inst->low_value_vector[offset];
-
-    // Each value in low_value_vector is getting 1 loop older.
-    // Update age of each value in indexVal, and remove old values.
-    for (i = 0; i < 16; i++)
-    {
-        p3 = idxptr + i;
-        if (*p3 != 100)
-        {
-            *p3 += 1;
-        } else
-        {
-            p1 = valptr + i + 1;
-            p2 = p3 + 1;
-            for (j = i; j < 16; j++)
-            {
-                *(valptr + j) = *p1++;
-                *(idxptr + j) = *p2++;
-            }
-            *(idxptr + 15) = 101;
-            *(valptr + 15) = 10000;
-        }
-    }
-
-    // Check if x smaller than any of the values in low_value_vector.
-    // If so, find position.
-    if (x < *(valptr + 7))
-    {
-        if (x < *(valptr + 3))
-        {
-            if (x < *(valptr + 1))
-            {
-                if (x < *valptr)
-                {
-                    II = 0;
-                } else
-                {
-                    II = 1;
-                }
-            } else if (x < *(valptr + 2))
-            {
-                II = 2;
-            } else
-            {
-                II = 3;
-            }
-        } else if (x < *(valptr + 5))
-        {
-            if (x < *(valptr + 4))
-            {
-                II = 4;
-            } else
-            {
-                II = 5;
-            }
-        } else if (x < *(valptr + 6))
-        {
-            II = 6;
-        } else
-        {
-            II = 7;
-        }
-    } else if (x < *(valptr + 15))
-    {
-        if (x < *(valptr + 11))
-        {
-            if (x < *(valptr + 9))
-            {
-                if (x < *(valptr + 8))
-                {
-                    II = 8;
-                } else
-                {
-                    II = 9;
-                }
-            } else if (x < *(valptr + 10))
-            {
-                II = 10;
-            } else
-            {
-                II = 11;
-            }
-        } else if (x < *(valptr + 13))
-        {
-            if (x < *(valptr + 12))
-            {
-                II = 12;
-            } else
-            {
-                II = 13;
-            }
-        } else if (x < *(valptr + 14))
-        {
-            II = 14;
-        } else
-        {
-            II = 15;
-        }
-    }
-
-    // Put new min value on right position and shift bigger values up
-    if (II > -1)
-    {
-        for (i = 15; i > II; i--)
-        {
-            k = i - 1;
-            *(valptr + i) = *(valptr + k);
-            *(idxptr + i) = *(idxptr + k);
-        }
-        *(valptr + II) = x;
-        *(idxptr + II) = 1;
-    }
-
-    meanV = 0;
-    if ((inst->frame_counter) > 4)
-    {
-        j = 5;
-    } else
-    {
-        j = inst->frame_counter;
-    }
-
-    if (j > 2)
-    {
-        meanV = *(valptr + 2);
-    } else if (j > 0)
-    {
-        meanV = *valptr;
-    } else
-    {
-        meanV = 1600;
-    }
-
-    if (inst->frame_counter > 0)
-    {
-        if (meanV < inst->mean_value[n])
-        {
-            alpha = (WebRtc_Word16)ALPHA1; // 0.2 in Q15
-        } else
-        {
-            alpha = (WebRtc_Word16)ALPHA2; // 0.99 in Q15
-        }
-    } else
-    {
-        alpha = 0;
-    }
-
-    tmp32 = WEBRTC_SPL_MUL_16_16((alpha+1), inst->mean_value[n]);
-    tmp32_1 = WEBRTC_SPL_MUL_16_16(WEBRTC_SPL_WORD16_MAX - alpha, meanV);
-    tmp32 += tmp32_1;
-    tmp32 += 16384;
-    inst->mean_value[n] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32, 15);
-
-    return inst->mean_value[n];
-}
diff --git a/src/common_audio/vad/main/source/vad_sp.h b/src/common_audio/vad/main/source/vad_sp.h
deleted file mode 100644
index ae15c11..0000000
--- a/src/common_audio/vad/main/source/vad_sp.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This header file includes the VAD internal calls for Downsampling and FindMinimum.
- * Specific function calls are given below.
- */
-
-#ifndef WEBRTC_VAD_SP_H_
-#define WEBRTC_VAD_SP_H_
-
-#include "vad_core.h"
-
-/****************************************************************************
- * WebRtcVad_Downsampling(...)
- *
- * Downsamples the signal a factor 2, eg. 32->16 or 16->8
- *
- * Input:
- *      - signal_in     : Input signal
- *      - in_length     : Length of input signal in samples
- *
- * Input & Output:
- *      - filter_state  : Filter state for first all-pass filters
- *
- * Output:
- *      - signal_out    : Downsampled signal (of length len/2)
- */
-void WebRtcVad_Downsampling(WebRtc_Word16* signal_in,
-                            WebRtc_Word16* signal_out,
-                            WebRtc_Word32* filter_state,
-                            int in_length);
-
-/****************************************************************************
- * WebRtcVad_FindMinimum(...)
- *
- * Find the five lowest values of x in 100 frames long window. Return a mean
- * value of these five values.
- *
- * Input:
- *      - feature_value : Feature value
- *      - channel       : Channel number
- *
- * Input & Output:
- *      - inst          : State information
- *
- * Output:
- *      return value    : Weighted minimum value for a moving window.
- */
-WebRtc_Word16 WebRtcVad_FindMinimum(VadInstT* inst, WebRtc_Word16 feature_value, int channel);
-
-#endif // WEBRTC_VAD_SP_H_
diff --git a/src/common_audio/vad/main/test/unit_test/unit_test.cc b/src/common_audio/vad/main/test/unit_test/unit_test.cc
deleted file mode 100644
index 8ac793e..0000000
--- a/src/common_audio/vad/main/test/unit_test/unit_test.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This file includes the implementation of the VAD unit tests.
- */
-
-#include <cstring>
-#include "unit_test.h"
-#include "webrtc_vad.h"
-
-
-class VadEnvironment : public ::testing::Environment {
- public:
-  virtual void SetUp() {
-  }
-
-  virtual void TearDown() {
-  }
-};
-
-VadTest::VadTest()
-{
-}
-
-void VadTest::SetUp() {
-}
-
-void VadTest::TearDown() {
-}
-
-TEST_F(VadTest, ApiTest) {
-    VadInst *vad_inst;
-    int i, j, k;
-    short zeros[960];
-    short speech[960];
-    char version[32];
-
-    // Valid test cases
-    int fs[3] = {8000, 16000, 32000};
-    int nMode[4] = {0, 1, 2, 3};
-    int framelen[3][3] = {{80, 160, 240},
-    {160, 320, 480}, {320, 640, 960}} ;
-    int vad_counter = 0;
-
-    memset(zeros, 0, sizeof(short) * 960);
-    memset(speech, 1, sizeof(short) * 960);
-    speech[13] = 1374;
-    speech[73] = -3747;
-
-
-
-    // WebRtcVad_get_version()
-    WebRtcVad_get_version(version);
-    //printf("API Test for %s\n", version);
-
-    // Null instance tests
-    EXPECT_EQ(-1, WebRtcVad_Create(NULL));
-    EXPECT_EQ(-1, WebRtcVad_Init(NULL));
-    EXPECT_EQ(-1, WebRtcVad_Assign(NULL, NULL));
-    EXPECT_EQ(-1, WebRtcVad_Free(NULL));
-    EXPECT_EQ(-1, WebRtcVad_set_mode(NULL, nMode[0]));
-    EXPECT_EQ(-1, WebRtcVad_Process(NULL, fs[0], speech,  framelen[0][0]));
-
-
-    EXPECT_EQ(WebRtcVad_Create(&vad_inst), 0);
-
-    // Not initialized tests
-    EXPECT_EQ(-1, WebRtcVad_Process(vad_inst, fs[0], speech,  framelen[0][0]));
-    EXPECT_EQ(-1, WebRtcVad_set_mode(vad_inst, nMode[0]));
-
-    // WebRtcVad_Init() tests
-    EXPECT_EQ(WebRtcVad_Init(vad_inst), 0);
-
-    // WebRtcVad_set_mode() tests
-    EXPECT_EQ(-1, WebRtcVad_set_mode(vad_inst, -1));
-    EXPECT_EQ(-1, WebRtcVad_set_mode(vad_inst, 4));
-
-    for (i = 0; i < sizeof(nMode)/sizeof(nMode[0]); i++) {
-        EXPECT_EQ(WebRtcVad_set_mode(vad_inst, nMode[i]), 0);
-    }
-
-    // WebRtcVad_Process() tests
-    EXPECT_EQ(-1, WebRtcVad_Process(vad_inst, fs[0], NULL,  framelen[0][0]));
-    EXPECT_EQ(-1, WebRtcVad_Process(vad_inst, 12000, speech,  framelen[0][0]));
-    EXPECT_EQ(-1, WebRtcVad_Process(vad_inst, fs[0], speech,  framelen[1][1]));
-    EXPECT_EQ(WebRtcVad_Process(vad_inst, fs[0], zeros,  framelen[0][0]), 0);
-    for (i = 0; i < sizeof(fs)/sizeof(fs[0]); i++) {
-        for (j = 0; j < sizeof(framelen[0])/sizeof(framelen[0][0]); j++) {
-            for (k = 0; k < sizeof(nMode)/sizeof(nMode[0]); k++) {
-                EXPECT_EQ(WebRtcVad_set_mode(vad_inst, nMode[k]), 0);
-//                printf("%d\n", WebRtcVad_Process(vad_inst, fs[i], speech,  framelen[i][j]));
-                if (vad_counter < 9)
-                {
-                    EXPECT_EQ(WebRtcVad_Process(vad_inst, fs[i], speech,  framelen[i][j]), 1);
-                } else
-                {
-                    EXPECT_EQ(WebRtcVad_Process(vad_inst, fs[i], speech,  framelen[i][j]), 0);
-                }
-                vad_counter++;
-            }
-        }
-    }
-
-    EXPECT_EQ(0, WebRtcVad_Free(vad_inst));
-
-}
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  VadEnvironment* env = new VadEnvironment;
-  ::testing::AddGlobalTestEnvironment(env);
-
-  return RUN_ALL_TESTS();
-}
diff --git a/src/common_audio/vad/main/test/unit_test/unit_test.h b/src/common_audio/vad/main/test/unit_test/unit_test.h
deleted file mode 100644
index 62dac11..0000000
--- a/src/common_audio/vad/main/test/unit_test/unit_test.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/*
- * This header file includes the declaration of the VAD unit test.
- */
-
-#ifndef WEBRTC_VAD_UNIT_TEST_H_
-#define WEBRTC_VAD_UNIT_TEST_H_
-
-#include <gtest/gtest.h>
-
-class VadTest : public ::testing::Test {
- protected:
-  VadTest();
-  virtual void SetUp();
-  virtual void TearDown();
-};
-
-#endif  // WEBRTC_VAD_UNIT_TEST_H_
diff --git a/src/common_audio/vad/main/source/vad.gyp b/src/common_audio/vad/vad.gypi
similarity index 61%
rename from src/common_audio/vad/main/source/vad.gyp
rename to src/common_audio/vad/vad.gypi
index 754b684..4b12db0 100644
--- a/src/common_audio/vad/main/source/vad.gyp
+++ b/src/common_audio/vad/vad.gypi
@@ -7,32 +7,27 @@
 # be found in the AUTHORS file in the root of the source tree.
 
 {
-  'includes': [
-    '../../../../common_settings.gypi', # Common settings
-  ],
   'targets': [
     {
       'target_name': 'vad',
       'type': '<(library)',
       'dependencies': [
-        '../../../signal_processing_library/main/source/spl.gyp:spl',
+        'signal_processing',
       ],
       'include_dirs': [
-        '../interface',
+        'include',
       ],
       'direct_dependent_settings': {
         'include_dirs': [
-          '../interface',
+          'include',
         ],
       },
       'sources': [
-        '../interface/webrtc_vad.h',
+        'include/webrtc_vad.h',
         'webrtc_vad.c',
-        'vad_const.c',
-        'vad_const.h',
-        'vad_defines.h',
         'vad_core.c',
         'vad_core.h',
+        'vad_defines.h',
         'vad_filterbank.c',
         'vad_filterbank.h',
         'vad_gmm.c',
@@ -41,7 +36,25 @@
         'vad_sp.h',
       ],
     },
-  ],
+  ], # targets
+   'conditions': [
+    ['build_with_chromium==0', {
+      'targets' : [
+        {
+          'target_name': 'vad_unittests',
+          'type': 'executable',
+          'dependencies': [
+            'vad',
+            '<(webrtc_root)/../test/test.gyp:test_support_main',
+            '<(webrtc_root)/../testing/gtest.gyp:gtest',
+          ],
+          'sources': [
+            'vad_unittest.cc',
+          ],
+        }, # vad_unittests
+      ], # targets
+    }], # build_with_chromium
+  ], # conditions
 }
 
 # Local Variables:
diff --git a/src/common_audio/vad/main/source/vad_core.c b/src/common_audio/vad/vad_core.c
similarity index 92%
rename from src/common_audio/vad/main/source/vad_core.c
rename to src/common_audio/vad/vad_core.c
index e882999..e05c296 100644
--- a/src/common_audio/vad/main/source/vad_core.c
+++ b/src/common_audio/vad/vad_core.c
@@ -15,12 +15,50 @@
  */
 
 #include "vad_core.h"
-#include "vad_const.h"
+
+#include "signal_processing_library.h"
+#include "typedefs.h"
 #include "vad_defines.h"
 #include "vad_filterbank.h"
 #include "vad_gmm.h"
 #include "vad_sp.h"
-#include "signal_processing_library.h"
+
+// Spectrum Weighting
+static const WebRtc_Word16 kSpectrumWeight[6] = { 6, 8, 10, 12, 14, 16 };
+static const WebRtc_Word16 kNoiseUpdateConst = 655; // Q15
+static const WebRtc_Word16 kSpeechUpdateConst = 6554; // Q15
+static const WebRtc_Word16 kBackEta = 154; // Q8
+// Minimum difference between the two models, Q5
+static const WebRtc_Word16 kMinimumDifference[6] = {
+    544, 544, 576, 576, 576, 576 };
+// Upper limit of mean value for speech model, Q7
+static const WebRtc_Word16 kMaximumSpeech[6] = {
+    11392, 11392, 11520, 11520, 11520, 11520 };
+// Minimum value for mean value
+static const WebRtc_Word16 kMinimumMean[2] = { 640, 768 };
+// Upper limit of mean value for noise model, Q7
+static const WebRtc_Word16 kMaximumNoise[6] = {
+    9216, 9088, 8960, 8832, 8704, 8576 };
+// Start values for the Gaussian models, Q7
+// Weights for the two Gaussians for the six channels (noise)
+static const WebRtc_Word16 kNoiseDataWeights[12] = {
+    34, 62, 72, 66, 53, 25, 94, 66, 56, 62, 75, 103 };
+// Weights for the two Gaussians for the six channels (speech)
+static const WebRtc_Word16 kSpeechDataWeights[12] = {
+    48, 82, 45, 87, 50, 47, 80, 46, 83, 41, 78, 81 };
+// Means for the two Gaussians for the six channels (noise)
+static const WebRtc_Word16 kNoiseDataMeans[12] = {
+    6738, 4892, 7065, 6715, 6771, 3369, 7646, 3863, 7820, 7266, 5020, 4362 };
+// Means for the two Gaussians for the six channels (speech)
+static const WebRtc_Word16 kSpeechDataMeans[12] = {
+    8306, 10085, 10078, 11823, 11843, 6309, 9473, 9571, 10879, 7581, 8180, 7483
+};
+// Stds for the two Gaussians for the six channels (noise)
+static const WebRtc_Word16 kNoiseDataStds[12] = {
+    378, 1064, 493, 582, 688, 593, 474, 697, 475, 688, 421, 455 };
+// Stds for the two Gaussians for the six channels (speech)
+static const WebRtc_Word16 kSpeechDataStds[12] = {
+    555, 505, 567, 524, 585, 1231, 509, 828, 492, 1540, 1079, 850 };
 
 static const int kInitCheck = 42;
 
@@ -651,10 +689,10 @@
                 *nmean2ptr -= tmp16_2;
             }
 
-            *nmean1ptr++;
-            *smean1ptr++;
-            *nstd1ptr++;
-            *sstd1ptr++;
+            nmean1ptr++;
+            smean1ptr++;
+            nstd1ptr++;
+            sstd1ptr++;
         }
         inst->frame_counter++;
     } else
diff --git a/src/common_audio/vad/main/source/vad_core.h b/src/common_audio/vad/vad_core.h
similarity index 96%
rename from src/common_audio/vad/main/source/vad_core.h
rename to src/common_audio/vad/vad_core.h
index 544caf5a..cad6ca4 100644
--- a/src/common_audio/vad/main/source/vad_core.h
+++ b/src/common_audio/vad/vad_core.h
@@ -28,11 +28,14 @@
     WebRtc_Word16 speech_means[NUM_TABLE_VALUES];
     WebRtc_Word16 noise_stds[NUM_TABLE_VALUES];
     WebRtc_Word16 speech_stds[NUM_TABLE_VALUES];
+    // TODO(bjornv): Change to |frame_count|.
     WebRtc_Word32 frame_counter;
     WebRtc_Word16 over_hang; // Over Hang
     WebRtc_Word16 num_of_speech;
+    // TODO(bjornv): Change to |age_vector|.
     WebRtc_Word16 index_vector[16 * NUM_CHANNELS];
     WebRtc_Word16 low_value_vector[16 * NUM_CHANNELS];
+    // TODO(bjornv): Change to |median|.
     WebRtc_Word16 mean_value[NUM_CHANNELS];
     WebRtc_Word16 upper_state[5];
     WebRtc_Word16 lower_state[5];
diff --git a/src/common_audio/vad/main/source/vad_defines.h b/src/common_audio/vad/vad_defines.h
similarity index 100%
rename from src/common_audio/vad/main/source/vad_defines.h
rename to src/common_audio/vad/vad_defines.h
diff --git a/src/common_audio/vad/vad_filterbank.c b/src/common_audio/vad/vad_filterbank.c
new file mode 100644
index 0000000..63eef5b
--- /dev/null
+++ b/src/common_audio/vad/vad_filterbank.c
@@ -0,0 +1,278 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file includes the implementation of the internal filterbank associated functions.
+ * For function description, see vad_filterbank.h.
+ */
+
+#include "vad_filterbank.h"
+
+#include "signal_processing_library.h"
+#include "typedefs.h"
+#include "vad_defines.h"
+
+// Constant 160*log10(2) in Q9
+static const int16_t kLogConst = 24660;
+
+// Coefficients used by WebRtcVad_HpOutput, Q14
+static const int16_t kHpZeroCoefs[3] = { 6631, -13262, 6631 };
+static const int16_t kHpPoleCoefs[3] = { 16384, -7756, 5620 };
+
+// Allpass filter coefficients, upper and lower, in Q15
+// Upper: 0.64, Lower: 0.17
+static const int16_t kAllPassCoefsQ15[2] = { 20972, 5571 };
+
+// Adjustment for division with two in WebRtcVad_SplitFilter
+static const int16_t kOffsetVector[6] = { 368, 368, 272, 176, 176, 176 };
+
+void WebRtcVad_HpOutput(int16_t* in_vector,
+                        int in_vector_length,
+                        int16_t* filter_state,
+                        int16_t* out_vector) {
+  int i;
+  int16_t* in_ptr = in_vector;
+  int16_t* out_ptr = out_vector;
+  int32_t tmp32 = 0;
+
+
+  // The sum of the absolute values of the impulse response:
+  // The zero/pole-filter has a max amplification of a single sample of: 1.4546
+  // Impulse response: 0.4047 -0.6179 -0.0266  0.1993  0.1035  -0.0194
+  // The all-zero section has a max amplification of a single sample of: 1.6189
+  // Impulse response: 0.4047 -0.8094  0.4047  0       0        0
+  // The all-pole section has a max amplification of a single sample of: 1.9931
+  // Impulse response: 1.0000  0.4734 -0.1189 -0.2187 -0.0627   0.04532
+
+  for (i = 0; i < in_vector_length; i++) {
+    // all-zero section (filter coefficients in Q14)
+    tmp32 = (int32_t) WEBRTC_SPL_MUL_16_16(kHpZeroCoefs[0], (*in_ptr));
+    tmp32 += (int32_t) WEBRTC_SPL_MUL_16_16(kHpZeroCoefs[1], filter_state[0]);
+    tmp32 += (int32_t) WEBRTC_SPL_MUL_16_16(kHpZeroCoefs[2],
+                                            filter_state[1]);  // Q14
+    filter_state[1] = filter_state[0];
+    filter_state[0] = *in_ptr++;
+
+    // all-pole section
+    tmp32 -= (int32_t) WEBRTC_SPL_MUL_16_16(kHpPoleCoefs[1],
+                                            filter_state[2]);  // Q14
+    tmp32 -= (int32_t) WEBRTC_SPL_MUL_16_16(kHpPoleCoefs[2], filter_state[3]);
+    filter_state[3] = filter_state[2];
+    filter_state[2] = (int16_t) WEBRTC_SPL_RSHIFT_W32 (tmp32, 14);
+    *out_ptr++ = filter_state[2];
+  }
+}
+
+void WebRtcVad_Allpass(int16_t* in_vector,
+                       int16_t filter_coefficients,
+                       int vector_length,
+                       int16_t* filter_state,
+                       int16_t* out_vector) {
+  // The filter can only cause overflow (in the w16 output variable)
+  // if more than 4 consecutive input numbers are of maximum value and
+  // has the the same sign as the impulse responses first taps.
+  // First 6 taps of the impulse response: 0.6399 0.5905 -0.3779
+  // 0.2418 -0.1547 0.0990
+
+  int i;
+  int16_t tmp16 = 0;
+  int32_t tmp32 = 0, in32 = 0;
+  int32_t state32 = WEBRTC_SPL_LSHIFT_W32((int32_t) (*filter_state), 16); // Q31
+
+  for (i = 0; i < vector_length; i++) {
+    tmp32 = state32 + WEBRTC_SPL_MUL_16_16(filter_coefficients, (*in_vector));
+    tmp16 = (int16_t) WEBRTC_SPL_RSHIFT_W32(tmp32, 16);
+    *out_vector++ = tmp16;
+    in32 = WEBRTC_SPL_LSHIFT_W32(((int32_t) (*in_vector)), 14);
+    state32 = in32 - WEBRTC_SPL_MUL_16_16(filter_coefficients, tmp16);
+    state32 = WEBRTC_SPL_LSHIFT_W32(state32, 1);
+    in_vector += 2;
+  }
+
+  *filter_state = (int16_t) WEBRTC_SPL_RSHIFT_W32(state32, 16);
+}
+
+void WebRtcVad_SplitFilter(int16_t* in_vector,
+                           int in_vector_length,
+                           int16_t* upper_state,
+                           int16_t* lower_state,
+                           int16_t* out_vector_hp,
+                           int16_t* out_vector_lp) {
+  int16_t tmp_out;
+  int i;
+  int half_length = WEBRTC_SPL_RSHIFT_W16(in_vector_length, 1);
+
+  // All-pass filtering upper branch
+  WebRtcVad_Allpass(&in_vector[0], kAllPassCoefsQ15[0], half_length,
+                    upper_state, out_vector_hp);
+
+  // All-pass filtering lower branch
+  WebRtcVad_Allpass(&in_vector[1], kAllPassCoefsQ15[1], half_length,
+                    lower_state, out_vector_lp);
+
+  // Make LP and HP signals
+  for (i = 0; i < half_length; i++) {
+    tmp_out = *out_vector_hp;
+    *out_vector_hp++ -= *out_vector_lp;
+    *out_vector_lp++ += tmp_out;
+  }
+}
+
+int16_t WebRtcVad_get_features(VadInstT* inst,
+                               int16_t* in_vector,
+                               int frame_size,
+                               int16_t* out_vector) {
+  int16_t power = 0;
+  // We expect |frame_size| to be 80, 160 or 240 samples, which corresponds to
+  // 10, 20 or 30 ms in 8 kHz. Therefore, the intermediate downsampled data will
+  // have at most 120 samples after the first split and at most 60 samples after
+  // the second split.
+  int16_t hp_120[120], lp_120[120];
+  int16_t hp_60[60], lp_60[60];
+  // Initialize variables for the first SplitFilter().
+  int length = frame_size;
+  int frequency_band = 0;
+  int16_t* in_ptr = in_vector;
+  int16_t* hp_out_ptr = hp_120;
+  int16_t* lp_out_ptr = lp_120;
+
+  // Split at 2000 Hz and downsample
+  WebRtcVad_SplitFilter(in_ptr, length, &inst->upper_state[frequency_band],
+                        &inst->lower_state[frequency_band], hp_out_ptr,
+                        lp_out_ptr);
+
+  // Split at 3000 Hz and downsample
+  frequency_band = 1;
+  in_ptr = hp_120;
+  hp_out_ptr = hp_60;
+  lp_out_ptr = lp_60;
+  length = WEBRTC_SPL_RSHIFT_W16(frame_size, 1);
+
+  WebRtcVad_SplitFilter(in_ptr, length, &inst->upper_state[frequency_band],
+                        &inst->lower_state[frequency_band], hp_out_ptr,
+                        lp_out_ptr);
+
+  // Energy in 3000 Hz - 4000 Hz
+  length = WEBRTC_SPL_RSHIFT_W16(length, 1);
+  WebRtcVad_LogOfEnergy(hp_60, length, kOffsetVector[5], &power,
+                        &out_vector[5]);
+
+  // Energy in 2000 Hz - 3000 Hz
+  WebRtcVad_LogOfEnergy(lp_60, length, kOffsetVector[4], &power,
+                        &out_vector[4]);
+
+  // Split at 1000 Hz and downsample
+  frequency_band = 2;
+  in_ptr = lp_120;
+  hp_out_ptr = hp_60;
+  lp_out_ptr = lp_60;
+  length = WEBRTC_SPL_RSHIFT_W16(frame_size, 1);
+  WebRtcVad_SplitFilter(in_ptr, length, &inst->upper_state[frequency_band],
+                        &inst->lower_state[frequency_band], hp_out_ptr,
+                        lp_out_ptr);
+
+  // Energy in 1000 Hz - 2000 Hz
+  length = WEBRTC_SPL_RSHIFT_W16(length, 1);
+  WebRtcVad_LogOfEnergy(hp_60, length, kOffsetVector[3], &power,
+                        &out_vector[3]);
+
+  // Split at 500 Hz
+  frequency_band = 3;
+  in_ptr = lp_60;
+  hp_out_ptr = hp_120;
+  lp_out_ptr = lp_120;
+
+  WebRtcVad_SplitFilter(in_ptr, length, &inst->upper_state[frequency_band],
+                        &inst->lower_state[frequency_band], hp_out_ptr,
+                        lp_out_ptr);
+
+  // Energy in 500 Hz - 1000 Hz
+  length = WEBRTC_SPL_RSHIFT_W16(length, 1);
+  WebRtcVad_LogOfEnergy(hp_120, length, kOffsetVector[2], &power,
+                        &out_vector[2]);
+
+  // Split at 250 Hz
+  frequency_band = 4;
+  in_ptr = lp_120;
+  hp_out_ptr = hp_60;
+  lp_out_ptr = lp_60;
+
+  WebRtcVad_SplitFilter(in_ptr, length, &inst->upper_state[frequency_band],
+                        &inst->lower_state[frequency_band], hp_out_ptr,
+                        lp_out_ptr);
+
+  // Energy in 250 Hz - 500 Hz
+  length = WEBRTC_SPL_RSHIFT_W16(length, 1);
+  WebRtcVad_LogOfEnergy(hp_60, length, kOffsetVector[1], &power,
+                        &out_vector[1]);
+
+  // Remove DC and LFs
+  WebRtcVad_HpOutput(lp_60, length, inst->hp_filter_state, hp_120);
+
+  // Power in 80 Hz - 250 Hz
+  WebRtcVad_LogOfEnergy(hp_120, length, kOffsetVector[0], &power,
+                        &out_vector[0]);
+
+  return power;
+}
+
+void WebRtcVad_LogOfEnergy(int16_t* vector,
+                           int vector_length,
+                           int16_t offset,
+                           int16_t* power,
+                           int16_t* log_energy) {
+  int shfts = 0, shfts2 = 0;
+  int16_t energy_s16 = 0;
+  int16_t zeros = 0, frac = 0, log2 = 0;
+  int32_t energy = WebRtcSpl_Energy(vector, vector_length, &shfts);
+
+  if (energy > 0) {
+
+    shfts2 = 16 - WebRtcSpl_NormW32(energy);
+    shfts += shfts2;
+    // "shfts" is the total number of right shifts that has been done to
+    // energy_s16.
+    energy_s16 = (int16_t) WEBRTC_SPL_SHIFT_W32(energy, -shfts2);
+
+    // Find:
+    // 160*log10(energy_s16*2^shfts) = 160*log10(2)*log2(energy_s16*2^shfts) =
+    // 160*log10(2)*(log2(energy_s16) + log2(2^shfts)) =
+    // 160*log10(2)*(log2(energy_s16) + shfts)
+
+    zeros = WebRtcSpl_NormU32(energy_s16);
+    frac = (int16_t) (((uint32_t) ((int32_t) (energy_s16) << zeros)
+        & 0x7FFFFFFF) >> 21);
+    log2 = (int16_t) (((31 - zeros) << 10) + frac);
+
+    *log_energy = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(kLogConst, log2, 19)
+        + (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(shfts, kLogConst, 9);
+
+    if (*log_energy < 0) {
+      *log_energy = 0;
+    }
+  } else {
+    *log_energy = 0;
+    shfts = -15;
+    energy_s16 = 0;
+  }
+
+  *log_energy += offset;
+
+  // Total power in frame
+  if (*power <= MIN_ENERGY) {
+    if (shfts > 0) {
+      *power += MIN_ENERGY + 1;
+    } else if (WEBRTC_SPL_SHIFT_W16(energy_s16, shfts) > MIN_ENERGY) {
+      *power += MIN_ENERGY + 1;
+    } else {
+      *power += WEBRTC_SPL_SHIFT_W16(energy_s16, shfts);
+    }
+  }
+}
diff --git a/src/common_audio/vad/main/source/vad_filterbank.h b/src/common_audio/vad/vad_filterbank.h
similarity index 73%
rename from src/common_audio/vad/main/source/vad_filterbank.h
rename to src/common_audio/vad/vad_filterbank.h
index a5507ea..1285c47 100644
--- a/src/common_audio/vad/main/source/vad_filterbank.h
+++ b/src/common_audio/vad/vad_filterbank.h
@@ -8,17 +8,18 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
 /*
  * This header file includes the description of the internal VAD call
  * WebRtcVad_GaussianProbability.
  */
 
-#ifndef WEBRTC_VAD_FILTERBANK_H_
-#define WEBRTC_VAD_FILTERBANK_H_
+#ifndef WEBRTC_COMMON_AUDIO_VAD_VAD_FILTERBANK_H_
+#define WEBRTC_COMMON_AUDIO_VAD_VAD_FILTERBANK_H_
 
+#include "typedefs.h"
 #include "vad_core.h"
 
+// TODO(bjornv): Move local functions to vad_filterbank.c and make static.
 /****************************************************************************
  * WebRtcVad_HpOutput(...)
  *
@@ -34,10 +35,10 @@
  *      - filter_state      : Updated state of the filter
  *
  */
-void WebRtcVad_HpOutput(WebRtc_Word16* in_vector,
-                        WebRtc_Word16  in_vector_length,
-                        WebRtc_Word16* out_vector,
-                        WebRtc_Word16* filter_state);
+void WebRtcVad_HpOutput(int16_t* in_vector,
+                        int in_vector_length,
+                        int16_t* filter_state,
+                        int16_t* out_vector);
 
 /****************************************************************************
  * WebRtcVad_Allpass(...)
@@ -58,11 +59,11 @@
  *      - filter_state          : Updated state of the filter (Q(-1))
  *
  */
-void WebRtcVad_Allpass(WebRtc_Word16* in_vector,
-                       WebRtc_Word16* outw16,
-                       WebRtc_Word16 filter_coefficients,
+void WebRtcVad_Allpass(int16_t* in_vector,
+                       int16_t filter_coefficients,
                        int vector_length,
-                       WebRtc_Word16* filter_state);
+                       int16_t* filter_state,
+                       int16_t* outw16);
 
 /****************************************************************************
  * WebRtcVad_SplitFilter(...)
@@ -83,12 +84,12 @@
  *      - lower_state       : Updated state of the lower filter
  *
  */
-void WebRtcVad_SplitFilter(WebRtc_Word16* in_vector,
-                           WebRtc_Word16* out_vector_hp,
-                           WebRtc_Word16* out_vector_lp,
-                           WebRtc_Word16* upper_state,
-                           WebRtc_Word16* lower_state,
-                           int in_vector_length);
+void WebRtcVad_SplitFilter(int16_t* in_vector,
+                           int in_vector_length,
+                           int16_t* upper_state,
+                           int16_t* lower_state,
+                           int16_t* out_vector_hp,
+                           int16_t* out_vector_lp);
 
 /****************************************************************************
  * WebRtcVad_get_features(...)
@@ -113,10 +114,10 @@
  * Return: total power in the signal (NOTE! This value is not exact since it
  *         is only used in a comparison.
  */
-WebRtc_Word16 WebRtcVad_get_features(VadInstT* inst,
-                                     WebRtc_Word16* in_vector,
-                                     int frame_size,
-                                     WebRtc_Word16* out_vector);
+int16_t WebRtcVad_get_features(VadInstT* inst,
+                               int16_t* in_vector,
+                               int frame_size,
+                               int16_t* out_vector);
 
 /****************************************************************************
  * WebRtcVad_LogOfEnergy(...)
@@ -129,15 +130,15 @@
  *      - vector_length     : Length of input vector
  *
  * Output:
- *      - enerlogval        : 10*log10(energy);
+ *      - log_energy        : 10*log10(energy);
  *      - power             : Update total power in speech frame. NOTE! This value
  *                            is not exact since it is only used in a comparison.
  *     
  */
-void WebRtcVad_LogOfEnergy(WebRtc_Word16* vector,
-                           WebRtc_Word16* enerlogval,
-                           WebRtc_Word16* power,
-                           WebRtc_Word16 offset,
-                           int vector_length);
+void WebRtcVad_LogOfEnergy(int16_t* vector,
+                           int vector_length,
+                           int16_t offset,
+                           int16_t* power,
+                           int16_t* log_energy);
 
-#endif // WEBRTC_VAD_FILTERBANK_H_
+#endif  // WEBRTC_COMMON_AUDIO_VAD_VAD_FILTERBANK_H_
diff --git a/src/common_audio/vad/vad_gmm.c b/src/common_audio/vad/vad_gmm.c
new file mode 100644
index 0000000..20a703a
--- /dev/null
+++ b/src/common_audio/vad/vad_gmm.c
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vad_gmm.h"
+
+#include "signal_processing_library.h"
+#include "typedefs.h"
+
+static const int32_t kCompVar = 22005;
+static const int16_t kLog2Exp = 5909;  // log2(exp(1)) in Q12.
+
+// For a normal distribution, the probability of |input| is calculated and
+// returned (in Q20). The formula for normal distributed probability is
+//
+// 1 / s * exp(-(x - m)^2 / (2 * s^2))
+//
+// where the parameters are given in the following Q domains:
+// m = |mean| (Q7)
+// s = |std| (Q7)
+// x = |input| (Q4)
+// in addition to the probability we output |delta| (in Q11) used when updating
+// the noise/speech model.
+int32_t WebRtcVad_GaussianProbability(int16_t input,
+                                      int16_t mean,
+                                      int16_t std,
+                                      int16_t* delta) {
+  int16_t tmp16, inv_std, inv_std2, exp_value = 0;
+  int32_t tmp32;
+
+  // Calculate |inv_std| = 1 / s, in Q10.
+  // 131072 = 1 in Q17, and (|std| >> 1) is for rounding instead of truncation.
+  // Q-domain: Q17 / Q7 = Q10.
+  tmp32 = (int32_t) 131072 + (int32_t) (std >> 1);
+  inv_std = (int16_t) WebRtcSpl_DivW32W16(tmp32, std);
+
+  // Calculate |inv_std2| = 1 / s^2, in Q14.
+  tmp16 = (inv_std >> 2);  // Q10 -> Q8.
+  // Q-domain: (Q8 * Q8) >> 2 = Q14.
+  inv_std2 = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(tmp16, tmp16, 2);
+  // TODO(bjornv): Investigate if changing to
+  // |inv_std2| = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(|inv_std|, |inv_std|, 6);
+  // gives better accuracy.
+
+  tmp16 = (input << 3);  // Q4 -> Q7
+  tmp16 = tmp16 - mean;  // Q7 - Q7 = Q7
+
+  // To be used later, when updating noise/speech model.
+  // |delta| = (x - m) / s^2, in Q11.
+  // Q-domain: (Q14 * Q7) >> 10 = Q11.
+  *delta = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(inv_std2, tmp16, 10);
+
+  // Calculate the exponent |tmp32| = (x - m)^2 / (2 * s^2), in Q10. Replacing
+  // division by two with one shift.
+  // Q-domain: (Q11 * Q7) >> 8 = Q10.
+  tmp32 = WEBRTC_SPL_MUL_16_16_RSFT(*delta, tmp16, 9);
+
+  // If the exponent is small enough to give a non-zero probability we calculate
+  // |exp_value| ~= exp(-(x - m)^2 / (2 * s^2))
+  //             ~= exp2(-log2(exp(1)) * |tmp32|).
+  if (tmp32 < kCompVar) {
+    // Calculate |tmp16| = log2(exp(1)) * |tmp32|, in Q10.
+    // Q-domain: (Q12 * Q10) >> 12 = Q10.
+    tmp16 = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(kLog2Exp, (int16_t) tmp32, 12);
+    tmp16 = -tmp16;
+    exp_value = (0x0400 | (tmp16 & 0x03FF));
+    tmp16 ^= 0xFFFF;
+    tmp16 >>= 10;
+    tmp16 += 1;
+    // Get |exp_value| = exp(-|tmp32|) in Q10.
+    exp_value >>= tmp16;
+  }
+
+  // Calculate and return (1 / s) * exp(-(x - m)^2 / (2 * s^2)), in Q20.
+  // Q-domain: Q10 * Q10 = Q20.
+  return WEBRTC_SPL_MUL_16_16(inv_std, exp_value);
+}
diff --git a/src/common_audio/vad/vad_gmm.h b/src/common_audio/vad/vad_gmm.h
new file mode 100644
index 0000000..2333af7
--- /dev/null
+++ b/src/common_audio/vad/vad_gmm.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Gaussian probability calculations internally used in vad_core.c.
+
+#ifndef WEBRTC_COMMON_AUDIO_VAD_VAD_GMM_H_
+#define WEBRTC_COMMON_AUDIO_VAD_VAD_GMM_H_
+
+#include "typedefs.h"
+
+// Calculates the probability for |input|, given that |input| comes from a
+// normal distribution with mean and standard deviation (|mean|, |std|).
+//
+// Inputs:
+//      - input         : input sample in Q4.
+//      - mean          : mean input in the statistical model, Q7.
+//      - std           : standard deviation, Q7.
+//
+// Output:
+//
+//      - delta         : input used when updating the model, Q11.
+//                        |delta| = (|input| - |mean|) / |std|^2.
+//
+// Return:
+//   (probability for |input|) =
+//    1 / |std| * exp(-(|input| - |mean|)^2 / (2 * |std|^2));
+int32_t WebRtcVad_GaussianProbability(int16_t input,
+                                      int16_t mean,
+                                      int16_t std,
+                                      int16_t* delta);
+
+#endif  // WEBRTC_COMMON_AUDIO_VAD_VAD_GMM_H_
diff --git a/src/common_audio/vad/vad_sp.c b/src/common_audio/vad/vad_sp.c
new file mode 100644
index 0000000..4fface3
--- /dev/null
+++ b/src/common_audio/vad/vad_sp.c
@@ -0,0 +1,181 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vad_sp.h"
+
+#include <assert.h>
+
+#include "signal_processing_library.h"
+#include "typedefs.h"
+#include "vad_defines.h"
+
+// Allpass filter coefficients, upper and lower, in Q13.
+// Upper: 0.64, Lower: 0.17.
+static const int16_t kAllPassCoefsQ13[2] = { 5243, 1392 };  // Q13
+
+// TODO(bjornv): Move this function to vad_filterbank.c.
+// Downsampling filter based on splitting filter and allpass functions.
+void WebRtcVad_Downsampling(int16_t* signal_in,
+                            int16_t* signal_out,
+                            int32_t* filter_state,
+                            int in_length) {
+  int16_t tmp16_1 = 0, tmp16_2 = 0;
+  int32_t tmp32_1 = filter_state[0];
+  int32_t tmp32_2 = filter_state[1];
+  int n = 0;
+  int half_length = (in_length >> 1);  // Downsampling by 2 gives half length.
+
+  // Filter coefficients in Q13, filter state in Q0.
+  for (n = 0; n < half_length; n++) {
+    // All-pass filtering upper branch.
+    tmp16_1 = (int16_t) ((tmp32_1 >> 1) +
+        WEBRTC_SPL_MUL_16_16_RSFT(kAllPassCoefsQ13[0], *signal_in, 14));
+    *signal_out = tmp16_1;
+    tmp32_1 = (int32_t) (*signal_in++) -
+        WEBRTC_SPL_MUL_16_16_RSFT(kAllPassCoefsQ13[0], tmp16_1, 12);
+
+    // All-pass filtering lower branch.
+    tmp16_2 = (int16_t) ((tmp32_2 >> 1) +
+        WEBRTC_SPL_MUL_16_16_RSFT(kAllPassCoefsQ13[1], *signal_in, 14));
+    *signal_out++ += tmp16_2;
+    tmp32_2 = (int32_t) (*signal_in++) -
+        WEBRTC_SPL_MUL_16_16_RSFT(kAllPassCoefsQ13[1], tmp16_2, 12);
+  }
+  // Store the filter states.
+  filter_state[0] = tmp32_1;
+  filter_state[1] = tmp32_2;
+}
+
+// Inserts |feature_value| into |low_value_vector|, if it is one of the 16
+// smallest values the last 100 frames. Then calculates and returns the median
+// of the five smallest values.
+int16_t WebRtcVad_FindMinimum(VadInstT* self,
+                              int16_t feature_value,
+                              int channel) {
+  int i = 0, j = 0;
+  int position = -1;
+  // Offset to beginning of the 16 minimum values in memory.
+  int offset = (channel << 4);
+  int16_t current_median = 1600;
+  int16_t alpha = 0;
+  int32_t tmp32 = 0;
+  // Pointer to memory for the 16 minimum values and the age of each value of
+  // the |channel|.
+  int16_t* age_ptr = &self->index_vector[offset];
+  int16_t* value_ptr = &self->low_value_vector[offset];
+  int16_t *p1, *p2, *p3;
+
+  assert(channel < NUM_CHANNELS);
+
+  // Each value in |low_value_vector| is getting 1 loop older.
+  // Update age of each value in |age_ptr|, and remove old values.
+  for (i = 0; i < 16; i++) {
+    p3 = age_ptr + i;
+    if (*p3 != 100) {
+      *p3 += 1;
+    } else {
+      p1 = value_ptr + i + 1;
+      p2 = p3 + 1;
+      for (j = i; j < 16; j++) {
+        *(value_ptr + j) = *p1++;
+        *(age_ptr + j) = *p2++;
+      }
+      *(age_ptr + 15) = 101;
+      *(value_ptr + 15) = 10000;
+    }
+  }
+
+  // Check if |feature_value| is smaller than any of the values in
+  // |low_value_vector|. If so, find the |position| where to insert the new
+  // value.
+  if (feature_value < *(value_ptr + 7)) {
+    if (feature_value < *(value_ptr + 3)) {
+      if (feature_value < *(value_ptr + 1)) {
+        if (feature_value < *value_ptr) {
+          position = 0;
+        } else {
+          position = 1;
+        }
+      } else if (feature_value < *(value_ptr + 2)) {
+        position = 2;
+      } else {
+        position = 3;
+      }
+    } else if (feature_value < *(value_ptr + 5)) {
+      if (feature_value < *(value_ptr + 4)) {
+        position = 4;
+      } else {
+        position = 5;
+      }
+    } else if (feature_value < *(value_ptr + 6)) {
+      position = 6;
+    } else {
+      position = 7;
+    }
+  } else if (feature_value < *(value_ptr + 15)) {
+    if (feature_value < *(value_ptr + 11)) {
+      if (feature_value < *(value_ptr + 9)) {
+        if (feature_value < *(value_ptr + 8)) {
+          position = 8;
+        } else {
+          position = 9;
+        }
+      } else if (feature_value < *(value_ptr + 10)) {
+        position = 10;
+      } else {
+        position = 11;
+      }
+    } else if (feature_value < *(value_ptr + 13)) {
+      if (feature_value < *(value_ptr + 12)) {
+        position = 12;
+      } else {
+        position = 13;
+      }
+    } else if (feature_value < *(value_ptr + 14)) {
+      position = 14;
+    } else {
+      position = 15;
+    }
+  }
+
+  // If we have a new small value, put it in the correct position and shift
+  // larger values up.
+  if (position > -1) {
+    for (i = 15; i > position; i--) {
+      j = i - 1;
+      *(value_ptr + i) = *(value_ptr + j);
+      *(age_ptr + i) = *(age_ptr + j);
+    }
+    *(value_ptr + position) = feature_value;
+    *(age_ptr + position) = 1;
+  }
+
+  // Get |current_median|.
+  if (self->frame_counter > 2) {
+    current_median = *(value_ptr + 2);
+  } else if (self->frame_counter > 0) {
+    current_median = *value_ptr;
+  }
+
+  // Smooth the median value.
+  if (self->frame_counter > 0) {
+    if (current_median < self->mean_value[channel]) {
+      alpha = (int16_t) ALPHA1;  // 0.2 in Q15.
+    } else {
+      alpha = (int16_t) ALPHA2;  // 0.99 in Q15.
+    }
+  }
+  tmp32 = WEBRTC_SPL_MUL_16_16(alpha + 1, self->mean_value[channel]);
+  tmp32 += WEBRTC_SPL_MUL_16_16(WEBRTC_SPL_WORD16_MAX - alpha, current_median);
+  tmp32 += 16384;
+  self->mean_value[channel] = (int16_t) (tmp32 >> 15);
+
+  return self->mean_value[channel];
+}
diff --git a/src/common_audio/vad/vad_sp.h b/src/common_audio/vad/vad_sp.h
new file mode 100644
index 0000000..95c3b4c
--- /dev/null
+++ b/src/common_audio/vad/vad_sp.h
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+// This file includes specific signal processing tools used in vad_core.c.
+
+#ifndef WEBRTC_COMMON_AUDIO_VAD_VAD_SP_H_
+#define WEBRTC_COMMON_AUDIO_VAD_VAD_SP_H_
+
+#include "typedefs.h"
+#include "vad_core.h"
+
+// Downsamples the signal by a factor 2, eg. 32->16 or 16->8.
+//
+// Inputs:
+//      - signal_in     : Input signal.
+//      - in_length     : Length of input signal in samples.
+//
+// Input & Output:
+//      - filter_state  : Current filter states of the two all-pass filters. The
+//                        |filter_state| is updated after all samples have been
+//                        processed.
+//
+// Output:
+//      - signal_out    : Downsampled signal (of length |in_length| / 2).
+void WebRtcVad_Downsampling(int16_t* signal_in,
+                            int16_t* signal_out,
+                            int32_t* filter_state,
+                            int in_length);
+
+// Updates and returns the smoothed feature minimum. As minimum we use the
+// median of the five smallest feature values in a 100 frames long window.
+//
+// Inputs:
+//      - feature_value : New feature value to update with.
+//      - channel       : Channel number.
+//
+// Input & Output:
+//      - handle        : State information of the VAD.
+//
+// Returns:
+//                      : Smoothed minimum value for a moving window.
+int16_t WebRtcVad_FindMinimum(VadInstT* handle,
+                              int16_t feature_value,
+                              int channel);
+
+#endif  // WEBRTC_COMMON_AUDIO_VAD_VAD_SP_H_
diff --git a/src/common_audio/vad/vad_unittest.cc b/src/common_audio/vad/vad_unittest.cc
new file mode 100644
index 0000000..54a397a
--- /dev/null
+++ b/src/common_audio/vad/vad_unittest.cc
@@ -0,0 +1,234 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>  // size_t
+#include <stdlib.h>
+
+#include "gtest/gtest.h"
+#include "typedefs.h"
+#include "webrtc_vad.h"
+
+// TODO(bjornv): Move the internal unit tests to separate files.
+extern "C" {
+#include "vad_core.h"
+#include "vad_gmm.h"
+#include "vad_sp.h"
+}
+
+namespace webrtc {
+namespace {
+const int16_t kModes[] = { 0, 1, 2, 3 };
+const size_t kModesSize = sizeof(kModes) / sizeof(*kModes);
+
+// Rates we support.
+const int16_t kRates[] = { 8000, 12000, 16000, 24000, 32000 };
+const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
+// Frame lengths we support.
+const int16_t kMaxFrameLength = 960;
+const int16_t kFrameLengths[] = { 80, 120, 160, 240, 320, 480, 640,
+    kMaxFrameLength };
+const size_t kFrameLengthsSize = sizeof(kFrameLengths) / sizeof(*kFrameLengths);
+
+// Returns true if the rate and frame length combination is valid.
+bool ValidRatesAndFrameLengths(int16_t rate, int16_t frame_length) {
+  if (rate == 8000) {
+    if (frame_length == 80 || frame_length == 160 || frame_length == 240) {
+      return true;
+    }
+    return false;
+  } else if (rate == 16000) {
+    if (frame_length == 160 || frame_length == 320 || frame_length == 480) {
+      return true;
+    }
+    return false;
+  }
+  if (rate == 32000) {
+    if (frame_length == 320 || frame_length == 640 || frame_length == 960) {
+      return true;
+    }
+    return false;
+  }
+
+  return false;
+}
+
+class VadTest : public ::testing::Test {
+ protected:
+  VadTest();
+  virtual void SetUp();
+  virtual void TearDown();
+};
+
+VadTest::VadTest() {
+}
+
+void VadTest::SetUp() {
+}
+
+void VadTest::TearDown() {
+}
+
+TEST_F(VadTest, ApiTest) {
+  // This API test runs through the APIs for all possible valid and invalid
+  // combinations.
+
+  VadInst* handle = NULL;
+  int16_t zeros[kMaxFrameLength] = { 0 };
+
+  // Construct a speech signal that will trigger the VAD in all modes. It is
+  // known that (i * i) will wrap around, but that doesn't matter in this case.
+  int16_t speech[kMaxFrameLength];
+  for (int16_t i = 0; i < kMaxFrameLength; i++) {
+    speech[i] = (i * i);
+  }
+
+  // WebRtcVad_get_version() tests
+  char version[32];
+  EXPECT_EQ(-1, WebRtcVad_get_version(NULL, sizeof(version)));
+  EXPECT_EQ(-1, WebRtcVad_get_version(version, 1));
+  EXPECT_EQ(0, WebRtcVad_get_version(version, sizeof(version)));
+
+  // Null instance tests
+  EXPECT_EQ(-1, WebRtcVad_Create(NULL));
+  EXPECT_EQ(-1, WebRtcVad_Init(NULL));
+  EXPECT_EQ(-1, WebRtcVad_Assign(NULL, NULL));
+  EXPECT_EQ(-1, WebRtcVad_Free(NULL));
+  EXPECT_EQ(-1, WebRtcVad_set_mode(NULL, kModes[0]));
+  EXPECT_EQ(-1, WebRtcVad_Process(NULL, kRates[0], speech, kFrameLengths[0]));
+
+  // WebRtcVad_AssignSize tests
+  int handle_size_bytes = 0;
+  EXPECT_EQ(0, WebRtcVad_AssignSize(&handle_size_bytes));
+  EXPECT_EQ(576, handle_size_bytes);
+
+  // WebRtcVad_Assign tests
+  void* tmp_handle = malloc(handle_size_bytes);
+  EXPECT_EQ(-1, WebRtcVad_Assign(&handle, NULL));
+  EXPECT_EQ(0, WebRtcVad_Assign(&handle, tmp_handle));
+  EXPECT_EQ(handle, tmp_handle);
+  free(tmp_handle);
+
+  // WebRtcVad_Create()
+  ASSERT_EQ(0, WebRtcVad_Create(&handle));
+
+  // Not initialized tests
+  EXPECT_EQ(-1, WebRtcVad_Process(handle, kRates[0], speech, kFrameLengths[0]));
+  EXPECT_EQ(-1, WebRtcVad_set_mode(handle, kModes[0]));
+
+  // WebRtcVad_Init() test
+  ASSERT_EQ(0, WebRtcVad_Init(handle));
+
+  // WebRtcVad_set_mode() invalid modes tests
+  EXPECT_EQ(-1, WebRtcVad_set_mode(handle, kModes[0] - 1));
+  EXPECT_EQ(-1, WebRtcVad_set_mode(handle, kModes[kModesSize - 1] + 1));
+
+  // WebRtcVad_Process() tests
+  // NULL speech pointer
+  EXPECT_EQ(-1, WebRtcVad_Process(handle, kRates[0], NULL, kFrameLengths[0]));
+  // Invalid sampling rate
+  EXPECT_EQ(-1, WebRtcVad_Process(handle, 9999, speech, kFrameLengths[0]));
+  // All zeros as input should work
+  EXPECT_EQ(0, WebRtcVad_Process(handle, kRates[0], zeros, kFrameLengths[0]));
+  for (size_t k = 0; k < kModesSize; k++) {
+    // Test valid modes
+    EXPECT_EQ(0, WebRtcVad_set_mode(handle, kModes[k]));
+    // Loop through sampling rate and frame length combinations
+    for (size_t i = 0; i < kRatesSize; i++) {
+      for (size_t j = 0; j < kFrameLengthsSize; j++) {
+        if (ValidRatesAndFrameLengths(kRates[i], kFrameLengths[j])) {
+          EXPECT_EQ(1, WebRtcVad_Process(handle,
+                                         kRates[i],
+                                         speech,
+                                         kFrameLengths[j]));
+        } else {
+          EXPECT_EQ(-1, WebRtcVad_Process(handle,
+                                          kRates[i],
+                                          speech,
+                                          kFrameLengths[j]));
+        }
+      }
+    }
+  }
+
+  EXPECT_EQ(0, WebRtcVad_Free(handle));
+}
+
+TEST_F(VadTest, GMMTests) {
+  int16_t delta = 0;
+  // Input value at mean.
+  EXPECT_EQ(1048576, WebRtcVad_GaussianProbability(0, 0, 128, &delta));
+  EXPECT_EQ(0, delta);
+  EXPECT_EQ(1048576, WebRtcVad_GaussianProbability(16, 128, 128, &delta));
+  EXPECT_EQ(0, delta);
+  EXPECT_EQ(1048576, WebRtcVad_GaussianProbability(-16, -128, 128, &delta));
+  EXPECT_EQ(0, delta);
+
+  // Largest possible input to give non-zero probability.
+  EXPECT_EQ(1024, WebRtcVad_GaussianProbability(59, 0, 128, &delta));
+  EXPECT_EQ(7552, delta);
+  EXPECT_EQ(1024, WebRtcVad_GaussianProbability(75, 128, 128, &delta));
+  EXPECT_EQ(7552, delta);
+  EXPECT_EQ(1024, WebRtcVad_GaussianProbability(-75, -128, 128, &delta));
+  EXPECT_EQ(-7552, delta);
+
+  // Too large input, should give zero probability.
+  EXPECT_EQ(0, WebRtcVad_GaussianProbability(105, 0, 128, &delta));
+  EXPECT_EQ(13440, delta);
+}
+
+TEST_F(VadTest, SPTests) {
+  VadInstT* handle = (VadInstT*) malloc(sizeof(VadInstT));
+  int16_t zeros[kMaxFrameLength] = { 0 };
+  int32_t state[2] = { 0 };
+  int16_t data_in[kMaxFrameLength];
+  int16_t data_out[kMaxFrameLength];
+
+  const int16_t kReferenceMin[32] = {
+      1600, 720, 509, 512, 532, 552, 570, 588,
+      606, 624, 642, 659, 675, 691, 707, 723,
+      1600, 544, 502, 522, 542, 561, 579, 597,
+      615, 633, 651, 667, 683, 699, 715, 731
+  };
+
+  // Construct a speech signal that will trigger the VAD in all modes. It is
+  // known that (i * i) will wrap around, but that doesn't matter in this case.
+  for (int16_t i = 0; i < kMaxFrameLength; ++i) {
+    data_in[i] = (i * i);
+  }
+  // Input values all zeros, expect all zeros out.
+  WebRtcVad_Downsampling(zeros, data_out, state, (int) kMaxFrameLength);
+  EXPECT_EQ(0, state[0]);
+  EXPECT_EQ(0, state[1]);
+  for (int16_t i = 0; i < kMaxFrameLength / 2; ++i) {
+    EXPECT_EQ(0, data_out[i]);
+  }
+  // Make a simple non-zero data test.
+  WebRtcVad_Downsampling(data_in, data_out, state, (int) kMaxFrameLength);
+  EXPECT_EQ(207, state[0]);
+  EXPECT_EQ(2270, state[1]);
+
+  ASSERT_EQ(0, WebRtcVad_InitCore(handle, 0));
+  for (int16_t i = 0; i < 16; ++i) {
+    int16_t value = 500 * (i + 1);
+    for (int j = 0; j < NUM_CHANNELS; ++j) {
+      // Use values both above and below initialized value.
+      EXPECT_EQ(kReferenceMin[i], WebRtcVad_FindMinimum(handle, value, j));
+      EXPECT_EQ(kReferenceMin[i + 16], WebRtcVad_FindMinimum(handle, 12000, j));
+    }
+    handle->frame_counter++;
+  }
+
+  free(handle);
+}
+
+// TODO(bjornv): Add a process test, run on file.
+
+}  // namespace
+}  // namespace webrtc
diff --git a/src/common_audio/vad/main/source/webrtc_vad.c b/src/common_audio/vad/webrtc_vad.c
similarity index 100%
rename from src/common_audio/vad/main/source/webrtc_vad.c
rename to src/common_audio/vad/webrtc_vad.c
diff --git a/src/common_types.h b/src/common_types.h
index 8b0b8a5..02d712e 100644
--- a/src/common_types.h
+++ b/src/common_types.h
@@ -251,6 +251,8 @@
     WebRtc_UWord16 currentBufferSize;
     // preferred (optimal) buffer size in ms
     WebRtc_UWord16 preferredBufferSize;
+    // adding extra delay due to "peaky jitter"
+    bool jitterPeaksFound;
     // loss rate (network + late) in percent (in Q14)
     WebRtc_UWord16 currentPacketLossRate;
     // late loss rate in percent (in Q14)
@@ -263,58 +265,14 @@
     WebRtc_UWord16 currentPreemptiveRate;
     // fraction of data removed through acceleration (in Q14)
     WebRtc_UWord16 currentAccelerateRate;
-};
-
-struct JitterStatistics
-{
-    // smallest Jitter Buffer size during call in ms
-    WebRtc_UWord32 jbMinSize;
-    // largest Jitter Buffer size during call in ms
-    WebRtc_UWord32 jbMaxSize;
-    // the average JB size, measured over time - ms
-    WebRtc_UWord32 jbAvgSize;
-    // number of times the Jitter Buffer changed (using Accelerate or
-    // Pre-emptive Expand)
-    WebRtc_UWord32 jbChangeCount;
-    // amount (in ms) of audio data received late
-    WebRtc_UWord32 lateLossMs;
-    // milliseconds removed to reduce jitter buffer size
-    WebRtc_UWord32 accelerateMs;
-    // milliseconds discarded through buffer flushing
-    WebRtc_UWord32 flushedMs;
-    // milliseconds of generated silence
-    WebRtc_UWord32 generatedSilentMs;
-    // milliseconds of synthetic audio data (non-background noise)
-    WebRtc_UWord32 interpolatedVoiceMs;
-    // milliseconds of synthetic audio data (background noise level)
-    WebRtc_UWord32 interpolatedSilentMs;
-    // count of tiny expansions in output audio
-    WebRtc_UWord32 countExpandMoreThan120ms;
-    // count of small expansions in output audio
-    WebRtc_UWord32 countExpandMoreThan250ms;
-    // count of medium expansions in output audio
-    WebRtc_UWord32 countExpandMoreThan500ms;
-    // count of long expansions in output audio
-    WebRtc_UWord32 countExpandMoreThan2000ms;
-    // duration of longest audio drop-out
-    WebRtc_UWord32 longestExpandDurationMs;
-    // count of times we got small network outage (inter-arrival time in
-    // [500, 1000) ms)
-    WebRtc_UWord32 countIAT500ms;
-    // count of times we got medium network outage (inter-arrival time in
-    // [1000, 2000) ms)
-    WebRtc_UWord32 countIAT1000ms;
-    // count of times we got large network outage (inter-arrival time >=
-    // 2000 ms)
-    WebRtc_UWord32 countIAT2000ms;
-    // longest packet inter-arrival time in ms
-    WebRtc_UWord32 longestIATms;
-    // min time incoming Packet "waited" to be played
-    WebRtc_UWord32 minPacketDelayMs;
-    // max time incoming Packet "waited" to be played
-    WebRtc_UWord32 maxPacketDelayMs;
-    // avg time incoming Packet "waited" to be played
-    WebRtc_UWord32 avgPacketDelayMs;
+    // clock-drift in parts-per-million (negative or positive)
+    int32_t clockDriftPPM;
+    // average packet waiting time in the jitter buffer (ms)
+    int meanWaitingTimeMs;
+    // median packet waiting time in the jitter buffer (ms)
+    int medianWaitingTimeMs;
+    // max packet waiting time in the jitter buffer (ms)
+    int maxWaitingTimeMs;
 };
 
 typedef struct
@@ -479,12 +437,15 @@
     kVideoMJPEG    = 10,
     kVideoNV12     = 11,
     kVideoNV21     = 12,
+    kVideoBGRA     = 13,
     kVideoUnknown  = 99
 };
 
 // Video codec
 enum { kConfigParameterSize = 128};
 enum { kPayloadNameSize = 32};
+enum { kMaxSimulcastStreams = 4};
+enum { kMaxTemporalStreams = 4};
 
 // H.263 specific
 struct VideoCodecH263
@@ -513,6 +474,17 @@
     kProfileMain = 0x01
 };
 
+enum VP8ResilienceMode {
+  kResilienceOff,    // The stream produced by the encoder requires a
+                     // recovery frame (typically a key frame) to be
+                     // decodable after a packet loss.
+  kResilientStream,  // A stream produced by the encoder is resilient to
+                     // packet losses, but packets within a frame subsequent
+                     // to a loss can't be decoded.
+  kResilientFrames   // Same as kResilientStream but with added resilience
+                     // within a frame.
+};
+
 struct VideoCodecH264
 {
     H264Packetization          packetization;
@@ -530,9 +502,11 @@
 // VP8 specific
 struct VideoCodecVP8
 {
-    bool                       pictureLossIndicationOn;
-    bool                       feedbackModeOn;
-    VideoCodecComplexity       complexity;
+    bool                 pictureLossIndicationOn;
+    bool                 feedbackModeOn;
+    VideoCodecComplexity complexity;
+    VP8ResilienceMode    resilience;
+    unsigned char        numberOfTemporalLayers;
 };
 
 // MPEG-4 specific
@@ -570,6 +544,19 @@
     VideoCodecGeneric   Generic;
 };
 
+/*
+*  Simulcast is when the same stream is encoded multiple times with different
+*  settings such as resolution.  
+*/
+struct SimulcastStream
+{
+    unsigned short      width;
+    unsigned short      height;
+    unsigned char       numberOfTemporalLayers;
+    unsigned int        maxBitrate;
+    unsigned int        qpMax; // minimum quality
+};
+
 // Common video codec properties
 struct VideoCodec
 {
@@ -588,8 +575,8 @@
     VideoCodecUnion     codecSpecific;
 
     unsigned int        qpMax;
+    unsigned char       numberOfSimulcastStreams;
+    SimulcastStream     simulcastStream[kMaxSimulcastStreams];
 };
-
 }  // namespace webrtc
-
 #endif  // WEBRTC_COMMON_TYPES_H
diff --git a/src/modules/audio_processing/Android.mk b/src/modules/audio_processing/Android.mk
new file mode 100644
index 0000000..9ca2aee
--- /dev/null
+++ b/src/modules/audio_processing/Android.mk
@@ -0,0 +1,143 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+include $(LOCAL_PATH)/../../../android-webrtc.mk
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE := libwebrtc_apm
+LOCAL_MODULE_TAGS := optional
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_SRC_FILES := \
+    $(call all-proto-files-under, .) \
+    audio_buffer.cc \
+    audio_processing_impl.cc \
+    echo_cancellation_impl.cc \
+    echo_control_mobile_impl.cc \
+    gain_control_impl.cc \
+    high_pass_filter_impl.cc \
+    level_estimator_impl.cc \
+    noise_suppression_impl.cc \
+    splitting_filter.cc \
+    processing_component.cc \
+    voice_detection_impl.cc
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS) \
+    '-DWEBRTC_NS_FIXED'
+#   floating point
+#   -DWEBRTC_NS_FLOAT'
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/interface \
+    $(LOCAL_PATH)/aec/interface \
+    $(LOCAL_PATH)/aecm/interface \
+    $(LOCAL_PATH)/agc/interface \
+    $(LOCAL_PATH)/ns/interface \
+    $(LOCAL_PATH)/../interface \
+    $(LOCAL_PATH)/../.. \
+    $(LOCAL_PATH)/../../common_audio/signal_processing/include \
+    $(LOCAL_PATH)/../../common_audio/vad/include \
+    $(LOCAL_PATH)/../../system_wrappers/interface \
+    external/protobuf/src
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libdl \
+    libstlport
+
+ifndef NDK_ROOT
+include external/stlport/libstlport.mk
+endif
+include $(BUILD_STATIC_LIBRARY)
+
+# apm process test app
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_TAGS := tests
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_SRC_FILES:= \
+    $(call all-proto-files-under, .) \
+    test/process_test.cc
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS)
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/interface \
+    $(LOCAL_PATH)/../interface \
+    $(LOCAL_PATH)/../.. \
+    $(LOCAL_PATH)/../../system_wrappers/interface \
+    external/gtest/include
+
+LOCAL_STATIC_LIBRARIES := \
+    libgtest \
+    libprotobuf-cpp-2.3.0-lite
+
+LOCAL_SHARED_LIBRARIES := \
+    libutils \
+    libstlport \
+    libwebrtc_audio_preprocessing
+
+LOCAL_MODULE:= webrtc_apm_process_test
+
+ifdef NDK_ROOT
+include $(BUILD_EXECUTABLE)
+else
+include external/stlport/libstlport.mk
+include $(BUILD_NATIVE_TEST)
+endif
+
+# apm unit test app
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_TAGS := tests
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_SRC_FILES:= \
+    $(call all-proto-files-under, test) \
+    test/unit_test.cc \
+    ../../../test/testsupport/fileutils.cc
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS) \
+    '-DWEBRTC_APM_UNIT_TEST_FIXED_PROFILE'
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/interface \
+    $(LOCAL_PATH)/../interface \
+    $(LOCAL_PATH)/../.. \
+    $(LOCAL_PATH)/../../../test \
+    $(LOCAL_PATH)/../../system_wrappers/interface \
+    $(LOCAL_PATH)/../../common_audio/signal_processing/include \
+    external/gtest/include \
+    external/protobuf/src
+
+LOCAL_STATIC_LIBRARIES := \
+    libgtest \
+    libprotobuf-cpp-2.3.0-lite
+
+LOCAL_SHARED_LIBRARIES := \
+    libstlport \
+    libwebrtc_audio_preprocessing
+
+LOCAL_MODULE:= webrtc_apm_unit_test
+
+ifdef NDK_ROOT
+include $(BUILD_EXECUTABLE)
+else
+include external/stlport/libstlport.mk
+include $(BUILD_NATIVE_TEST)
+endif
diff --git a/src/modules/audio_processing/OWNERS b/src/modules/audio_processing/OWNERS
index aecf56e..5a25634 100644
--- a/src/modules/audio_processing/OWNERS
+++ b/src/modules/audio_processing/OWNERS
@@ -1,2 +1,2 @@
-ajm@google.com
-bjornv@google.com
+andrew@webrtc.org
+bjornv@webrtc.org
diff --git a/src/modules/audio_processing/aec/Android.mk b/src/modules/audio_processing/aec/Android.mk
new file mode 100644
index 0000000..698755a
--- /dev/null
+++ b/src/modules/audio_processing/aec/Android.mk
@@ -0,0 +1,45 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+include $(LOCAL_PATH)/../../../../android-webrtc.mk
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := libwebrtc_aec
+LOCAL_MODULE_TAGS := optional
+LOCAL_SRC_FILES := \
+    echo_cancellation.c \
+    aec_resampler.c \
+    aec_core.c \
+    aec_rdft.c \
+    aec_core_sse2.c \
+    aec_rdft_sse2.c
+
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS)
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/interface \
+    $(LOCAL_PATH)/../utility \
+    $(LOCAL_PATH)/../../.. \
+    $(LOCAL_PATH)/../../../common_audio/signal_processing/include
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libdl \
+    libstlport
+
+ifndef NDK_ROOT
+include external/stlport/libstlport.mk
+endif
+include $(BUILD_STATIC_LIBRARY)
diff --git a/src/modules/audio_processing/aec/main/source/aec.gyp b/src/modules/audio_processing/aec/aec.gypi
similarity index 63%
rename from src/modules/audio_processing/aec/main/source/aec.gyp
rename to src/modules/audio_processing/aec/aec.gypi
index 0427e00..7e86a90 100644
--- a/src/modules/audio_processing/aec/main/source/aec.gyp
+++ b/src/modules/audio_processing/aec/aec.gypi
@@ -7,43 +7,43 @@
 # be found in the AUTHORS file in the root of the source tree.
 
 {
-  'includes': [
-    '../../../../../common_settings.gypi',
-  ],
   'targets': [
     {
       'target_name': 'aec',
       'type': '<(library)',
+      'variables': {
+        # Outputs some low-level debug files.
+        'aec_debug_dump%': 0,
+      },
       'dependencies': [
-        '../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
-        '../../../utility/util.gyp:apm_util'
+        '<(webrtc_root)/common_audio/common_audio.gyp:signal_processing',
+        'apm_util'
       ],
       'include_dirs': [
-        '../interface',
+        'interface',
       ],
       'direct_dependent_settings': {
         'include_dirs': [
-          '../interface',
+          'interface',
         ],
       },
       'sources': [
-        '../interface/echo_cancellation.h',
+        'interface/echo_cancellation.h',
         'echo_cancellation.c',
+        'aec_core.h',
         'aec_core.c',
         'aec_core_sse2.c',
         'aec_rdft.h',
         'aec_rdft.c',
         'aec_rdft_sse2.c',
-        'aec_core.h',
-        'resampler.c',
-        'resampler.h',
+        'aec_resampler.h',
+        'aec_resampler.c',
+      ],
+      'conditions': [
+        ['aec_debug_dump==1', {
+          'defines': [ 'WEBRTC_AEC_DEBUG_DUMP', ],
+        }],
       ],
     },
   ],
 }
-
-# Local Variables:
-# tab-width:2
-# indent-tabs-mode:nil
-# End:
-# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/modules/audio_processing/aec/main/source/aec_core.c b/src/modules/audio_processing/aec/aec_core.c
similarity index 67%
rename from src/modules/audio_processing/aec/main/source/aec_core.c
rename to src/modules/audio_processing/aec/aec_core.c
index 81197ea..6718dec 100644
--- a/src/modules/audio_processing/aec/main/source/aec_core.c
+++ b/src/modules/audio_processing/aec/aec_core.c
@@ -12,14 +12,22 @@
  * The core AEC algorithm, which is presented with time-aligned signals.
  */
 
+#include "aec_core.h"
+
+#include <assert.h>
 #include <math.h>
+#include <stddef.h>  // size_t
 #include <stdlib.h>
 #include <string.h>
 
-#include "aec_core.h"
 #include "aec_rdft.h"
+#include "delay_estimator_wrapper.h"
 #include "ring_buffer.h"
 #include "system_wrappers/interface/cpu_features_wrapper.h"
+#include "typedefs.h"
+
+// Buffer size (samples)
+static const size_t kBufSizePartitions = 250;  // 1 second of audio in 16 kHz.
 
 // Noise suppression
 static const int converged = 250;
@@ -34,26 +42,9 @@
 // Initial bin for averaging nlp gain in low band
 static const int freqAvgIc = PART_LEN / 2;
 
-/* Matlab code to produce table:
-win = sqrt(hanning(63)); win = [0 ; win(1:32)];
-fprintf(1, '\t%.14f, %.14f, %.14f,\n', win);
-*/
-/*
-static const float sqrtHanning[33] = {
-    0.00000000000000, 0.04906767432742, 0.09801714032956,
-    0.14673047445536, 0.19509032201613, 0.24298017990326,
-    0.29028467725446, 0.33688985339222, 0.38268343236509,
-    0.42755509343028, 0.47139673682600, 0.51410274419322,
-    0.55557023301960, 0.59569930449243, 0.63439328416365,
-    0.67155895484702, 0.70710678118655, 0.74095112535496,
-    0.77301045336274, 0.80320753148064, 0.83146961230255,
-    0.85772861000027, 0.88192126434835, 0.90398929312344,
-    0.92387953251129, 0.94154406518302, 0.95694033573221,
-    0.97003125319454, 0.98078528040323, 0.98917650996478,
-    0.99518472667220, 0.99879545620517, 1.00000000000000
-};
-*/
-
+// Matlab code to produce table:
+// win = sqrt(hanning(63)); win = [0 ; win(1:32)];
+// fprintf(1, '\t%.14f, %.14f, %.14f,\n', win);
 static const float sqrtHanning[65] = {
     0.00000000000000f, 0.02454122852291f, 0.04906767432742f,
     0.07356456359967f, 0.09801714032956f, 0.12241067519922f,
@@ -79,10 +70,9 @@
     0.99969881869620f, 1.00000000000000f
 };
 
-/* Matlab code to produce table:
-weightCurve = [0 ; 0.3 * sqrt(linspace(0,1,64))' + 0.1];
-fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', weightCurve);
-*/
+// Matlab code to produce table:
+// weightCurve = [0 ; 0.3 * sqrt(linspace(0,1,64))' + 0.1];
+// fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', weightCurve);
 const float WebRtcAec_weightCurve[65] = {
     0.0000f, 0.1000f, 0.1378f, 0.1535f, 0.1655f, 0.1756f,
     0.1845f, 0.1926f, 0.2000f, 0.2069f, 0.2134f, 0.2195f,
@@ -97,10 +87,9 @@
     0.3903f, 0.3928f, 0.3952f, 0.3976f, 0.4000f
 };
 
-/* Matlab code to produce table:
-overDriveCurve = [sqrt(linspace(0,1,65))' + 1];
-fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', overDriveCurve);
-*/
+// Matlab code to produce table:
+// overDriveCurve = [sqrt(linspace(0,1,65))' + 1];
+// fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', overDriveCurve);
 const float WebRtcAec_overDriveCurve[65] = {
     1.0000f, 1.1250f, 1.1768f, 1.2165f, 1.2500f, 1.2795f,
     1.3062f, 1.3307f, 1.3536f, 1.3750f, 1.3953f, 1.4146f,
@@ -116,12 +105,7 @@
 };
 
 // "Private" function prototypes.
-static void ProcessBlock(aec_t *aec, const short *farend,
-                              const short *nearend, const short *nearendH,
-                              short *out, short *outH);
-
-static void BufferFar(aec_t *aec, const short *farend, int farLen);
-static void FetchFar(aec_t *aec, short *farend, int farLen, int knownDelay);
+static void ProcessBlock(aec_t* aec);
 
 static void NonLinearProcessing(aec_t *aec, short *output, short *outputH);
 
@@ -134,8 +118,13 @@
 
 static void WebRtcAec_InitLevel(power_level_t *level);
 static void WebRtcAec_InitStats(stats_t *stats);
-static void UpdateLevel(power_level_t *level, const short *in);
+static void UpdateLevel(power_level_t* level, float in[2][PART_LEN1]);
 static void UpdateMetrics(aec_t *aec);
+// Convert from time domain to frequency domain. Note that |time_data| are
+// overwritten.
+static void TimeToFrequency(float time_data[PART_LEN2],
+                            float freq_data[2][PART_LEN1],
+                            int window);
 
 __inline static float MulRe(float aRe, float aIm, float bRe, float bIm)
 {
@@ -163,35 +152,67 @@
         return -1;
     }
 
-    if (WebRtcApm_CreateBuffer(&aec->farFrBuf, FRAME_LEN + PART_LEN) == -1) {
+    if (WebRtc_CreateBuffer(&aec->nearFrBuf,
+                            FRAME_LEN + PART_LEN,
+                            sizeof(int16_t)) == -1) {
         WebRtcAec_FreeAec(aec);
         aec = NULL;
         return -1;
     }
 
-    if (WebRtcApm_CreateBuffer(&aec->nearFrBuf, FRAME_LEN + PART_LEN) == -1) {
+    if (WebRtc_CreateBuffer(&aec->outFrBuf,
+                            FRAME_LEN + PART_LEN,
+                            sizeof(int16_t)) == -1) {
         WebRtcAec_FreeAec(aec);
         aec = NULL;
         return -1;
     }
 
-    if (WebRtcApm_CreateBuffer(&aec->outFrBuf, FRAME_LEN + PART_LEN) == -1) {
+    if (WebRtc_CreateBuffer(&aec->nearFrBufH,
+                            FRAME_LEN + PART_LEN,
+                            sizeof(int16_t)) == -1) {
         WebRtcAec_FreeAec(aec);
         aec = NULL;
         return -1;
     }
 
-    if (WebRtcApm_CreateBuffer(&aec->nearFrBufH, FRAME_LEN + PART_LEN) == -1) {
+    if (WebRtc_CreateBuffer(&aec->outFrBufH,
+                            FRAME_LEN + PART_LEN,
+                            sizeof(int16_t)) == -1) {
         WebRtcAec_FreeAec(aec);
         aec = NULL;
         return -1;
     }
 
-    if (WebRtcApm_CreateBuffer(&aec->outFrBufH, FRAME_LEN + PART_LEN) == -1) {
+    // Create far-end buffers.
+    if (WebRtc_CreateBuffer(&aec->far_buf, kBufSizePartitions,
+                            sizeof(float) * 2 * PART_LEN1) == -1) {
         WebRtcAec_FreeAec(aec);
         aec = NULL;
         return -1;
     }
+    if (WebRtc_CreateBuffer(&aec->far_buf_windowed, kBufSizePartitions,
+                            sizeof(float) * 2 * PART_LEN1) == -1) {
+        WebRtcAec_FreeAec(aec);
+        aec = NULL;
+        return -1;
+    }
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    if (WebRtc_CreateBuffer(&aec->far_time_buf, kBufSizePartitions,
+                            sizeof(int16_t) * PART_LEN) == -1) {
+        WebRtcAec_FreeAec(aec);
+        aec = NULL;
+        return -1;
+    }
+#endif
+    if (WebRtc_CreateDelayEstimator(&aec->delay_estimator,
+                                    PART_LEN1,
+                                    kMaxDelayBlocks,
+                                    kLookaheadBlocks) == -1) {
+      WebRtcAec_FreeAec(aec);
+      aec = NULL;
+      return -1;
+    }
 
     return 0;
 }
@@ -202,12 +223,18 @@
         return -1;
     }
 
-    WebRtcApm_FreeBuffer(aec->farFrBuf);
-    WebRtcApm_FreeBuffer(aec->nearFrBuf);
-    WebRtcApm_FreeBuffer(aec->outFrBuf);
+    WebRtc_FreeBuffer(aec->nearFrBuf);
+    WebRtc_FreeBuffer(aec->outFrBuf);
 
-    WebRtcApm_FreeBuffer(aec->nearFrBufH);
-    WebRtcApm_FreeBuffer(aec->outFrBufH);
+    WebRtc_FreeBuffer(aec->nearFrBufH);
+    WebRtc_FreeBuffer(aec->outFrBufH);
+
+    WebRtc_FreeBuffer(aec->far_buf);
+    WebRtc_FreeBuffer(aec->far_buf_windowed);
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    WebRtc_FreeBuffer(aec->far_time_buf);
+#endif
+    WebRtc_FreeDelayEstimator(aec->delay_estimator);
 
     free(aec);
     return 0;
@@ -255,6 +282,32 @@
   }
 }
 
+// Time-unconstrined filter adaptation.
+// TODO(andrew): consider for a low-complexity mode.
+//static void FilterAdaptationUnconstrained(aec_t *aec, float *fft,
+//                                          float ef[2][PART_LEN1]) {
+//  int i, j;
+//  for (i = 0; i < NR_PART; i++) {
+//    int xPos = (i + aec->xfBufBlockPos)*(PART_LEN1);
+//    int pos;
+//    // Check for wrap
+//    if (i + aec->xfBufBlockPos >= NR_PART) {
+//      xPos -= NR_PART * PART_LEN1;
+//    }
+//
+//    pos = i * PART_LEN1;
+//
+//    for (j = 0; j < PART_LEN1; j++) {
+//      aec->wfBuf[pos + j][0] += MulRe(aec->xfBuf[xPos + j][0],
+//                                      -aec->xfBuf[xPos + j][1],
+//                                      ef[j][0], ef[j][1]);
+//      aec->wfBuf[pos + j][1] += MulIm(aec->xfBuf[xPos + j][0],
+//                                      -aec->xfBuf[xPos + j][1],
+//                                      ef[j][0], ef[j][1]);
+//    }
+//  }
+//}
+
 static void FilterAdaptation(aec_t *aec, float *fft, float ef[2][PART_LEN1]) {
   int i, j;
   for (i = 0; i < NR_PART; i++) {
@@ -267,16 +320,6 @@
 
     pos = i * PART_LEN1;
 
-#ifdef UNCONSTR
-    for (j = 0; j < PART_LEN1; j++) {
-      aec->wfBuf[pos + j][0] += MulRe(aec->xfBuf[xPos + j][0],
-                                      -aec->xfBuf[xPos + j][1],
-                                      ef[j][0], ef[j][1]);
-      aec->wfBuf[pos + j][1] += MulIm(aec->xfBuf[xPos + j][0],
-                                      -aec->xfBuf[xPos + j][1],
-                                      ef[j][0], ef[j][1]);
-    }
-#else
     for (j = 0; j < PART_LEN; j++) {
 
       fft[2 * j] = MulRe(aec->xfBuf[0][xPos + j],
@@ -309,7 +352,6 @@
       aec->wfBuf[0][pos + j] += fft[2 * j];
       aec->wfBuf[1][pos + j] += fft[2 * j + 1];
     }
-#endif // UNCONSTR
   }
 }
 
@@ -355,25 +397,41 @@
         aec->errThresh = 1.5e-6f;
     }
 
-    if (WebRtcApm_InitBuffer(aec->farFrBuf) == -1) {
+    if (WebRtc_InitBuffer(aec->nearFrBuf) == -1) {
         return -1;
     }
 
-    if (WebRtcApm_InitBuffer(aec->nearFrBuf) == -1) {
+    if (WebRtc_InitBuffer(aec->outFrBuf) == -1) {
         return -1;
     }
 
-    if (WebRtcApm_InitBuffer(aec->outFrBuf) == -1) {
+    if (WebRtc_InitBuffer(aec->nearFrBufH) == -1) {
         return -1;
     }
 
-    if (WebRtcApm_InitBuffer(aec->nearFrBufH) == -1) {
+    if (WebRtc_InitBuffer(aec->outFrBufH) == -1) {
         return -1;
     }
 
-    if (WebRtcApm_InitBuffer(aec->outFrBufH) == -1) {
+    // Initialize far-end buffers.
+    if (WebRtc_InitBuffer(aec->far_buf) == -1) {
         return -1;
     }
+    if (WebRtc_InitBuffer(aec->far_buf_windowed) == -1) {
+        return -1;
+    }
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    if (WebRtc_InitBuffer(aec->far_time_buf) == -1) {
+        return -1;
+    }
+#endif
+    aec->system_delay = 0;
+
+    if (WebRtc_InitDelayEstimator(aec->delay_estimator) != 0) {
+      return -1;
+    }
+    aec->delay_logging_enabled = 0;
+    memset(aec->delay_histogram, 0, sizeof(aec->delay_histogram));
 
     // Default target suppression level
     aec->targetSupp = -11.5;
@@ -396,8 +454,6 @@
     aec->knownDelay = 0;
 
     // Initialize buffers
-    memset(aec->farBuf, 0, sizeof(aec->farBuf));
-    memset(aec->xBuf, 0, sizeof(aec->xBuf));
     memset(aec->dBuf, 0, sizeof(aec->dBuf));
     memset(aec->eBuf, 0, sizeof(aec->eBuf));
     // For H band
@@ -451,13 +507,6 @@
     aec->seed = 777;
     aec->delayEstCtr = 0;
 
-    // Features on by default (G.167)
-#ifdef G167
-    aec->adaptToggle = 1;
-    aec->nlpToggle = 1;
-    aec->cnToggle = 1;
-#endif
-
     // Metrics disabled by default
     aec->metricsMode = 0;
     WebRtcAec_InitMetrics(aec);
@@ -468,7 +517,7 @@
     WebRtcAec_FilterAdaptation = FilterAdaptation;
     WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppress;
     if (WebRtc_GetCPUInfo(kSSE2)) {
-#if defined(__SSE2__)
+#if defined(WEBRTC_USE_SSE2)
       WebRtcAec_InitAec_SSE2();
 #endif
     }
@@ -492,86 +541,108 @@
 }
 
 
-void WebRtcAec_ProcessFrame(aec_t *aec, const short *farend,
-                       const short *nearend, const short *nearendH,
-                       short *out, short *outH,
-                       int knownDelay)
-{
-    short farBl[PART_LEN], nearBl[PART_LEN], outBl[PART_LEN];
-    short farFr[FRAME_LEN];
-    // For H band
-    short nearBlH[PART_LEN], outBlH[PART_LEN];
+void WebRtcAec_BufferFarendPartition(aec_t *aec, const float* farend) {
+  float fft[PART_LEN2];
+  float xf[2][PART_LEN1];
 
-    int size = 0;
+  // Check if the buffer is full, and in that case flush the oldest data.
+  if (WebRtc_available_write(aec->far_buf) < 1) {
+    WebRtc_MoveReadPtr(aec->far_buf, 1);
+    WebRtc_MoveReadPtr(aec->far_buf_windowed, 1);
+    aec->system_delay -= PART_LEN;
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    WebRtc_MoveReadPtr(aec->far_time_buf, 1);
+#endif
+  }
+  // Convert far-end partition to the frequency domain without windowing.
+  memcpy(fft, farend, sizeof(float) * PART_LEN2);
+  TimeToFrequency(fft, xf, 0);
+  WebRtc_WriteBuffer(aec->far_buf, &xf[0][0], 1);
 
-    // initialize: only used for SWB
-    memset(nearBlH, 0, sizeof(nearBlH));
-    memset(outBlH, 0, sizeof(outBlH));
-
-    // Buffer the current frame.
-    // Fetch an older one corresponding to the delay.
-    BufferFar(aec, farend, FRAME_LEN);
-    FetchFar(aec, farFr, FRAME_LEN, knownDelay);
-
-    // Buffer the synchronized far and near frames,
-    // to pass the smaller blocks individually.
-    WebRtcApm_WriteBuffer(aec->farFrBuf, farFr, FRAME_LEN);
-    WebRtcApm_WriteBuffer(aec->nearFrBuf, nearend, FRAME_LEN);
-    // For H band
-    if (aec->sampFreq == 32000) {
-        WebRtcApm_WriteBuffer(aec->nearFrBufH, nearendH, FRAME_LEN);
-    }
-
-    // Process as many blocks as possible.
-    while (WebRtcApm_get_buffer_size(aec->farFrBuf) >= PART_LEN) {
-
-        WebRtcApm_ReadBuffer(aec->farFrBuf, farBl, PART_LEN);
-        WebRtcApm_ReadBuffer(aec->nearFrBuf, nearBl, PART_LEN);
-
-        // For H band
-        if (aec->sampFreq == 32000) {
-            WebRtcApm_ReadBuffer(aec->nearFrBufH, nearBlH, PART_LEN);
-        }
-
-        ProcessBlock(aec, farBl, nearBl, nearBlH, outBl, outBlH);
-
-        WebRtcApm_WriteBuffer(aec->outFrBuf, outBl, PART_LEN);
-        // For H band
-        if (aec->sampFreq == 32000) {
-            WebRtcApm_WriteBuffer(aec->outFrBufH, outBlH, PART_LEN);
-        }
-    }
-
-    // Stuff the out buffer if we have less than a frame to output.
-    // This should only happen for the first frame.
-    size = WebRtcApm_get_buffer_size(aec->outFrBuf);
-    if (size < FRAME_LEN) {
-        WebRtcApm_StuffBuffer(aec->outFrBuf, FRAME_LEN - size);
-        if (aec->sampFreq == 32000) {
-            WebRtcApm_StuffBuffer(aec->outFrBufH, FRAME_LEN - size);
-        }
-    }
-
-    // Obtain an output frame.
-    WebRtcApm_ReadBuffer(aec->outFrBuf, out, FRAME_LEN);
-    // For H band
-    if (aec->sampFreq == 32000) {
-        WebRtcApm_ReadBuffer(aec->outFrBufH, outH, FRAME_LEN);
-    }
+  // Convert far-end partition to the frequency domain with windowing.
+  memcpy(fft, farend, sizeof(float) * PART_LEN2);
+  TimeToFrequency(fft, xf, 1);
+  WebRtc_WriteBuffer(aec->far_buf_windowed, &xf[0][0], 1);
 }
 
-static void ProcessBlock(aec_t *aec, const short *farend,
-                              const short *nearend, const short *nearendH,
-                              short *output, short *outputH)
+void WebRtcAec_ProcessFrame(aec_t *aec,
+                            const short *nearend,
+                            const short *nearendH,
+                            int knownDelay)
 {
+    // For each frame the process is as follows:
+    // 1) If the system_delay indicates on being too small for processing a
+    //    frame we stuff the buffer with enough data for 10 ms.
+    // 2) Adjust the buffer to the system delay, by moving the read pointer.
+    // 3) If we can't move read pointer due to buffer size limitations we
+    //    flush/stuff the buffer.
+    // 4) Process as many partitions as possible.
+    // 5) Update the |system_delay| with respect to a full frame of FRAME_LEN
+    //    samples. Even though we will have data left to process (we work with
+    //    partitions) we consider updating a whole frame, since that's the
+    //    amount of data we input and output in audio_processing.
+
+    // TODO(bjornv): Investigate how we should round the delay difference; right
+    // now we know that incoming |knownDelay| is underestimated when it's less
+    // than |aec->knownDelay|. We therefore, round (-32) in that direction. In
+    // the other direction, we don't have this situation, but might flush one
+    // partition too little. This can cause non-causality, which should be
+    // investigated. Maybe, allow for a non-symmetric rounding, like -16.
+    int move_elements = (aec->knownDelay - knownDelay - 32) / PART_LEN;
+    int moved_elements = 0;
+
+    // TODO(bjornv): Change the near-end buffer handling to be the same as for
+    // far-end, that is, with a near_pre_buf.
+    // Buffer the near-end frame.
+    WebRtc_WriteBuffer(aec->nearFrBuf, nearend, FRAME_LEN);
+    // For H band
+    if (aec->sampFreq == 32000) {
+        WebRtc_WriteBuffer(aec->nearFrBufH, nearendH, FRAME_LEN);
+    }
+
+    // 1) At most we process |aec->mult|+1 partitions in 10 ms. Make sure we
+    // have enough far-end data for that by stuffing the buffer if the
+    // |system_delay| indicates others.
+    if (aec->system_delay < FRAME_LEN) {
+      // We don't have enough data so we rewind 10 ms.
+      WebRtc_MoveReadPtr(aec->far_buf_windowed, -(aec->mult + 1));
+      aec->system_delay -= WebRtc_MoveReadPtr(aec->far_buf, -(aec->mult + 1)) *
+          PART_LEN;
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+      WebRtc_MoveReadPtr(aec->far_time_buf, -(aec->mult + 1));
+#endif
+    }
+
+    // 2) Compensate for a possible change in the system delay.
+
+    WebRtc_MoveReadPtr(aec->far_buf_windowed, move_elements);
+    moved_elements = WebRtc_MoveReadPtr(aec->far_buf, move_elements);
+    aec->knownDelay -= moved_elements * PART_LEN;
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    WebRtc_MoveReadPtr(aec->far_time_buf, move_elements);
+#endif
+
+    // 4) Process as many blocks as possible.
+    while (WebRtc_available_read(aec->nearFrBuf) >= PART_LEN) {
+        ProcessBlock(aec);
+    }
+
+    // 5) Update system delay with respect to the entire frame.
+    aec->system_delay -= FRAME_LEN;
+}
+
+static void ProcessBlock(aec_t* aec) {
     int i;
     float d[PART_LEN], y[PART_LEN], e[PART_LEN], dH[PART_LEN];
-    short eInt16[PART_LEN];
     float scale;
 
     float fft[PART_LEN2];
     float xf[2][PART_LEN1], yf[2][PART_LEN1], ef[2][PART_LEN1];
-    complex_t df[PART_LEN1];
+    float df[2][PART_LEN1];
+    float far_spectrum = 0.0f;
+    float near_spectrum = 0.0f;
+    float abs_far_spectrum[PART_LEN1];
+    float abs_near_spectrum[PART_LEN1];
 
     const float gPow[2] = {0.9f, 0.1f};
 
@@ -581,66 +652,64 @@
     const float ramp = 1.0002f;
     const float gInitNoise[2] = {0.999f, 0.001f};
 
-#ifdef AEC_DEBUG
-    fwrite(farend, sizeof(short), PART_LEN, aec->farFile);
-    fwrite(nearend, sizeof(short), PART_LEN, aec->nearFile);
-#endif
+    int16_t nearend[PART_LEN];
+    int16_t* nearend_ptr = NULL;
+    int16_t output[PART_LEN];
+    int16_t outputH[PART_LEN];
+
+    float* xf_ptr = NULL;
 
     memset(dH, 0, sizeof(dH));
+    if (aec->sampFreq == 32000) {
+      // Get the upper band first so we can reuse |nearend|.
+      WebRtc_ReadBuffer(aec->nearFrBufH,
+                        (void**) &nearend_ptr,
+                        nearend,
+                        PART_LEN);
+      for (i = 0; i < PART_LEN; i++) {
+          dH[i] = (float) (nearend_ptr[i]);
+      }
+      memcpy(aec->dBufH + PART_LEN, dH, sizeof(float) * PART_LEN);
+    }
+    WebRtc_ReadBuffer(aec->nearFrBuf, (void**) &nearend_ptr, nearend, PART_LEN);
 
     // ---------- Ooura fft ----------
-    // Concatenate old and new farend blocks.
+    // Concatenate old and new nearend blocks.
     for (i = 0; i < PART_LEN; i++) {
-        aec->xBuf[i + PART_LEN] = (float)farend[i];
-        d[i] = (float)nearend[i];
+        d[i] = (float) (nearend_ptr[i]);
     }
-
-    if (aec->sampFreq == 32000) {
-        for (i = 0; i < PART_LEN; i++) {
-            dH[i] = (float)nearendH[i];
-        }
-    }
-
-
-    memcpy(fft, aec->xBuf, sizeof(float) * PART_LEN2);
     memcpy(aec->dBuf + PART_LEN, d, sizeof(float) * PART_LEN);
-    // For H band
-    if (aec->sampFreq == 32000) {
-        memcpy(aec->dBufH + PART_LEN, dH, sizeof(float) * PART_LEN);
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    {
+        int16_t farend[PART_LEN];
+        int16_t* farend_ptr = NULL;
+        WebRtc_ReadBuffer(aec->far_time_buf, (void**) &farend_ptr, farend, 1);
+        fwrite(farend_ptr, sizeof(int16_t), PART_LEN, aec->farFile);
+        fwrite(nearend_ptr, sizeof(int16_t), PART_LEN, aec->nearFile);
     }
+#endif
 
-    aec_rdft_forward_128(fft);
-
-    // Far fft
-    xf[1][0] = 0;
-    xf[1][PART_LEN] = 0;
-    xf[0][0] = fft[0];
-    xf[0][PART_LEN] = fft[1];
-
-    for (i = 1; i < PART_LEN; i++) {
-        xf[0][i] = fft[2 * i];
-        xf[1][i] = fft[2 * i + 1];
-    }
+    // We should always have at least one element stored in |far_buf|.
+    assert(WebRtc_available_read(aec->far_buf) > 0);
+    WebRtc_ReadBuffer(aec->far_buf, (void**) &xf_ptr, &xf[0][0], 1);
 
     // Near fft
     memcpy(fft, aec->dBuf, sizeof(float) * PART_LEN2);
-    aec_rdft_forward_128(fft);
-    df[0][1] = 0;
-    df[PART_LEN][1] = 0;
-    df[0][0] = fft[0];
-    df[PART_LEN][0] = fft[1];
-
-    for (i = 1; i < PART_LEN; i++) {
-        df[i][0] = fft[2 * i];
-        df[i][1] = fft[2 * i + 1];
-    }
+    TimeToFrequency(fft, df, 0);
 
     // Power smoothing
     for (i = 0; i < PART_LEN1; i++) {
-        aec->xPow[i] = gPow[0] * aec->xPow[i] + gPow[1] * NR_PART *
-            (xf[0][i] * xf[0][i] + xf[1][i] * xf[1][i]);
-        aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] *
-            (df[i][0] * df[i][0] + df[i][1] * df[i][1]);
+      far_spectrum = (xf_ptr[i] * xf_ptr[i]) +
+          (xf_ptr[PART_LEN1 + i] * xf_ptr[PART_LEN1 + i]);
+      aec->xPow[i] = gPow[0] * aec->xPow[i] + gPow[1] * NR_PART * far_spectrum;
+      // Calculate absolute spectra
+      abs_far_spectrum[i] = sqrtf(far_spectrum);
+
+      near_spectrum = df[0][i] * df[0][i] + df[1][i] * df[1][i];
+      aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] * near_spectrum;
+      // Calculate absolute spectra
+      abs_near_spectrum[i] = sqrtf(near_spectrum);
     }
 
     // Estimate noise power. Wait until dPow is more stable.
@@ -675,6 +744,19 @@
         aec->noisePow = aec->dMinPow;
     }
 
+    // Block wise delay estimation used for logging
+    if (aec->delay_logging_enabled) {
+      int delay_estimate = 0;
+      // Estimate the delay
+      delay_estimate = WebRtc_DelayEstimatorProcessFloat(aec->delay_estimator,
+                                                         abs_far_spectrum,
+                                                         abs_near_spectrum,
+                                                         PART_LEN1);
+      if (delay_estimate >= 0) {
+        // Update delay estimate buffer.
+        aec->delay_histogram[delay_estimate]++;
+      }
+    }
 
     // Update the xfBuf block position.
     aec->xfBufBlockPos--;
@@ -683,9 +765,9 @@
     }
 
     // Buffer xf
-    memcpy(aec->xfBuf[0] + aec->xfBufBlockPos * PART_LEN1, xf[0],
+    memcpy(aec->xfBuf[0] + aec->xfBufBlockPos * PART_LEN1, xf_ptr,
            sizeof(float) * PART_LEN1);
-    memcpy(aec->xfBuf[1] + aec->xfBufBlockPos * PART_LEN1, xf[1],
+    memcpy(aec->xfBuf[1] + aec->xfBufBlockPos * PART_LEN1, &xf_ptr[PART_LEN1],
            sizeof(float) * PART_LEN1);
 
     memset(yf[0], 0, sizeof(float) * (PART_LEN1 * 2));
@@ -715,6 +797,7 @@
     memcpy(aec->eBuf + PART_LEN, e, sizeof(float) * PART_LEN);
     memset(fft, 0, sizeof(float) * PART_LEN);
     memcpy(fft + PART_LEN, e, sizeof(float) * PART_LEN);
+    // TODO(bjornv): Change to use TimeToFrequency().
     aec_rdft_forward_128(fft);
 
     ef[1][0] = 0;
@@ -726,55 +809,49 @@
         ef[1][i] = fft[2 * i + 1];
     }
 
+    if (aec->metricsMode == 1) {
+      // Note that the first PART_LEN samples in fft (before transformation) are
+      // zero. Hence, the scaling by two in UpdateLevel() should not be
+      // performed. That scaling is taken care of in UpdateMetrics() instead.
+      UpdateLevel(&aec->linoutlevel, ef);
+    }
+
     // Scale error signal inversely with far power.
     WebRtcAec_ScaleErrorSignal(aec, ef);
-#ifdef G167
-    if (aec->adaptToggle) {
-#endif
-        // Filter adaptation
-        WebRtcAec_FilterAdaptation(aec, fft, ef);
-#ifdef G167
-    }
-#endif
-
+    WebRtcAec_FilterAdaptation(aec, fft, ef);
     NonLinearProcessing(aec, output, outputH);
 
-#if defined(AEC_DEBUG) || defined(G167)
-    for (i = 0; i < PART_LEN; i++) {
-        eInt16[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, e[i],
-            WEBRTC_SPL_WORD16_MIN);
-    }
-#endif
-#ifdef G167
-    if (aec->nlpToggle == 0) {
-        memcpy(output, eInt16, sizeof(eInt16));
-    }
-#endif
-
     if (aec->metricsMode == 1) {
-        for (i = 0; i < PART_LEN; i++) {
-            eInt16[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, e[i],
-                WEBRTC_SPL_WORD16_MIN);
-        }
-
         // Update power levels and echo metrics
-        UpdateLevel(&aec->farlevel, farend);
-        UpdateLevel(&aec->nearlevel, nearend);
-        UpdateLevel(&aec->linoutlevel, eInt16);
-        UpdateLevel(&aec->nlpoutlevel, output);
+        UpdateLevel(&aec->farlevel, (float (*)[PART_LEN1]) xf_ptr);
+        UpdateLevel(&aec->nearlevel, df);
         UpdateMetrics(aec);
     }
 
-#ifdef AEC_DEBUG
-    fwrite(eInt16, sizeof(short), PART_LEN, aec->outLpFile);
-    fwrite(output, sizeof(short), PART_LEN, aec->outFile);
+    // Store the output block.
+    WebRtc_WriteBuffer(aec->outFrBuf, output, PART_LEN);
+    // For H band
+    if (aec->sampFreq == 32000) {
+        WebRtc_WriteBuffer(aec->outFrBufH, outputH, PART_LEN);
+    }
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    {
+        int16_t eInt16[PART_LEN];
+        for (i = 0; i < PART_LEN; i++) {
+            eInt16[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, e[i],
+                WEBRTC_SPL_WORD16_MIN);
+        }
+
+        fwrite(eInt16, sizeof(int16_t), PART_LEN, aec->outLinearFile);
+        fwrite(output, sizeof(int16_t), PART_LEN, aec->outFile);
+    }
 #endif
 }
 
 static void NonLinearProcessing(aec_t *aec, short *output, short *outputH)
 {
-    float efw[2][PART_LEN1], dfw[2][PART_LEN1];
-    complex_t xfw[PART_LEN1];
+    float efw[2][PART_LEN1], dfw[2][PART_LEN1], xfw[2][PART_LEN1];
     complex_t comfortNoiseHband[PART_LEN1];
     float fft[PART_LEN2];
     float scale, dtmp;
@@ -798,10 +875,12 @@
     const float gCoh[2][2] = {{0.9f, 0.1f}, {0.93f, 0.07f}};
     const float *ptrGCoh = gCoh[aec->mult - 1];
 
-    // Filter energey
+    // Filter energy
     float wfEnMax = 0, wfEn = 0;
     const int delayEstInterval = 10 * aec->mult;
 
+    float* xfw_ptr = NULL;
+
     aec->delayEstCtr++;
     if (aec->delayEstCtr == delayEstInterval) {
         aec->delayEstCtr = 0;
@@ -832,25 +911,15 @@
         }
     }
 
+    // We should always have at least one element stored in |far_buf|.
+    assert(WebRtc_available_read(aec->far_buf_windowed) > 0);
     // NLP
-    // Windowed far fft
-    for (i = 0; i < PART_LEN; i++) {
-        fft[i] = aec->xBuf[i] * sqrtHanning[i];
-        fft[PART_LEN + i] = aec->xBuf[PART_LEN + i] * sqrtHanning[PART_LEN - i];
-    }
-    aec_rdft_forward_128(fft);
+    WebRtc_ReadBuffer(aec->far_buf_windowed, (void**) &xfw_ptr, &xfw[0][0], 1);
 
-    xfw[0][1] = 0;
-    xfw[PART_LEN][1] = 0;
-    xfw[0][0] = fft[0];
-    xfw[PART_LEN][0] = fft[1];
-    for (i = 1; i < PART_LEN; i++) {
-        xfw[i][0] = fft[2 * i];
-        xfw[i][1] = fft[2 * i + 1];
-    }
-
+    // TODO(bjornv): Investigate if we can reuse |far_buf_windowed| instead of
+    // |xfwBuf|.
     // Buffer far.
-    memcpy(aec->xfwBuf, xfw, sizeof(xfw));
+    memcpy(aec->xfwBuf, xfw_ptr, sizeof(float) * 2 * PART_LEN1);
 
     // Use delayed far.
     memcpy(xfw, aec->xfwBuf + aec->delayIdx * PART_LEN1, sizeof(xfw));
@@ -897,7 +966,7 @@
         // adverse interaction with the algorithm's tuning.
         // TODO: investigate further why this is so sensitive.
         aec->sx[i] = ptrGCoh[0] * aec->sx[i] + ptrGCoh[1] *
-            WEBRTC_SPL_MAX(xfw[i][0] * xfw[i][0] + xfw[i][1] * xfw[i][1], 15);
+            WEBRTC_SPL_MAX(xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i], 15);
 
         aec->sde[i][0] = ptrGCoh[0] * aec->sde[i][0] + ptrGCoh[1] *
             (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
@@ -905,9 +974,9 @@
             (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);
 
         aec->sxd[i][0] = ptrGCoh[0] * aec->sxd[i][0] + ptrGCoh[1] *
-            (dfw[0][i] * xfw[i][0] + dfw[1][i] * xfw[i][1]);
+            (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
         aec->sxd[i][1] = ptrGCoh[0] * aec->sxd[i][1] + ptrGCoh[1] *
-            (dfw[0][i] * xfw[i][1] - dfw[1][i] * xfw[i][0]);
+            (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);
 
         sdSum += aec->sd[i];
         seSum += aec->se[i];
@@ -1036,15 +1105,18 @@
 
     WebRtcAec_OverdriveAndSuppress(aec, hNl, hNlFb, efw);
 
-#ifdef G167
-    if (aec->cnToggle) {
-      ComfortNoise(aec, efw, comfortNoiseHband, aec->noisePow, hNl);
-    }
-#else
     // Add comfort noise.
     ComfortNoise(aec, efw, comfortNoiseHband, aec->noisePow, hNl);
-#endif
 
+    // TODO(bjornv): Investigate how to take the windowing below into account if
+    // needed.
+    if (aec->metricsMode == 1) {
+      // Note that we have a scaling by two in the time domain |eBuf|.
+      // In addition the time domain signal is windowed before transformation,
+      // losing half the energy on the average. We take care of the first
+      // scaling only in UpdateMetrics().
+      UpdateLevel(&aec->nlpoutlevel, efw);
+    }
     // Inverse error fft.
     fft[0] = efw[0][0];
     fft[1] = efw[0][PART_LEN];
@@ -1107,7 +1179,6 @@
     }
 
     // Copy the current block to the old position.
-    memcpy(aec->xBuf, aec->xBuf + PART_LEN, sizeof(float) * PART_LEN);
     memcpy(aec->dBuf, aec->dBuf + PART_LEN, sizeof(float) * PART_LEN);
     memcpy(aec->eBuf, aec->eBuf + PART_LEN, sizeof(float) * PART_LEN);
 
@@ -1216,60 +1287,6 @@
     }
 }
 
-// Buffer the farend to account for knownDelay
-static void BufferFar(aec_t *aec, const short *farend, int farLen)
-{
-    int writeLen = farLen, writePos = 0;
-
-    // Check if the write position must be wrapped.
-    while (aec->farBufWritePos + writeLen > FAR_BUF_LEN) {
-
-        // Write to remaining buffer space before wrapping.
-        writeLen = FAR_BUF_LEN - aec->farBufWritePos;
-        memcpy(aec->farBuf + aec->farBufWritePos, farend + writePos,
-            sizeof(short) * writeLen);
-        aec->farBufWritePos = 0;
-        writePos = writeLen;
-        writeLen = farLen - writeLen;
-    }
-
-    memcpy(aec->farBuf + aec->farBufWritePos, farend + writePos,
-        sizeof(short) * writeLen);
-    aec->farBufWritePos +=  writeLen;
-}
-
-static void FetchFar(aec_t *aec, short *farend, int farLen, int knownDelay)
-{
-    int readLen = farLen, readPos = 0, delayChange = knownDelay - aec->knownDelay;
-
-    aec->farBufReadPos -= delayChange;
-
-    // Check if delay forces a read position wrap.
-    while(aec->farBufReadPos < 0) {
-        aec->farBufReadPos += FAR_BUF_LEN;
-    }
-    while(aec->farBufReadPos > FAR_BUF_LEN - 1) {
-        aec->farBufReadPos -= FAR_BUF_LEN;
-    }
-
-    aec->knownDelay = knownDelay;
-
-    // Check if read position must be wrapped.
-    while (aec->farBufReadPos + readLen > FAR_BUF_LEN) {
-
-        // Read from remaining buffer space before wrapping.
-        readLen = FAR_BUF_LEN - aec->farBufReadPos;
-        memcpy(farend + readPos, aec->farBuf + aec->farBufReadPos,
-            sizeof(short) * readLen);
-        aec->farBufReadPos = 0;
-        readPos = readLen;
-        readLen = farLen - readLen;
-    }
-    memcpy(farend + readPos, aec->farBuf + aec->farBufReadPos,
-        sizeof(short) * readLen);
-    aec->farBufReadPos += readLen;
-}
-
 static void WebRtcAec_InitLevel(power_level_t *level)
 {
     const float bigFloat = 1E17f;
@@ -1296,42 +1313,68 @@
     stats->hicounter = 0;
 }
 
-static void UpdateLevel(power_level_t *level, const short *in)
-{
-    int k;
+static void UpdateLevel(power_level_t* level, float in[2][PART_LEN1]) {
+  // Do the energy calculation in the frequency domain. The FFT is performed on
+  // a segment of PART_LEN2 samples due to overlap, but we only want the energy
+  // of half that data (the last PART_LEN samples). Parseval's relation states
+  // that the energy is preserved according to
+  //
+  // \sum_{n=0}^{N-1} |x(n)|^2 = 1/N * \sum_{n=0}^{N-1} |X(n)|^2
+  //                           = ENERGY,
+  //
+  // where N = PART_LEN2. Since we are only interested in calculating the energy
+  // for the last PART_LEN samples we approximate by calculating ENERGY and
+  // divide by 2,
+  //
+  // \sum_{n=N/2}^{N-1} |x(n)|^2 ~= ENERGY / 2
+  //
+  // Since we deal with real valued time domain signals we only store frequency
+  // bins [0, PART_LEN], which is what |in| consists of. To calculate ENERGY we
+  // need to add the contribution from the missing part in
+  // [PART_LEN+1, PART_LEN2-1]. These values are, up to a phase shift, identical
+  // with the values in [1, PART_LEN-1], hence multiply those values by 2. This
+  // is the values in the for loop below, but multiplication by 2 and division
+  // by 2 cancel.
 
-    for (k = 0; k < PART_LEN; k++) {
-        level->sfrsum += in[k] * in[k];
+  // TODO(bjornv): Investigate reusing energy calculations performed at other
+  // places in the code.
+  int k = 1;
+  // Imaginary parts are zero at end points and left out of the calculation.
+  float energy = (in[0][0] * in[0][0]) / 2;
+  energy += (in[0][PART_LEN] * in[0][PART_LEN]) / 2;
+
+  for (k = 1; k < PART_LEN; k++) {
+    energy += (in[0][k] * in[0][k] + in[1][k] * in[1][k]);
+  }
+  energy /= PART_LEN2;
+
+  level->sfrsum += energy;
+  level->sfrcounter++;
+
+  if (level->sfrcounter > subCountLen) {
+    level->framelevel = level->sfrsum / (subCountLen * PART_LEN);
+    level->sfrsum = 0;
+    level->sfrcounter = 0;
+    if (level->framelevel > 0) {
+      if (level->framelevel < level->minlevel) {
+        level->minlevel = level->framelevel;  // New minimum.
+      } else {
+        level->minlevel *= (1 + 0.001f);  // Small increase.
+      }
     }
-    level->sfrcounter++;
-
-    if (level->sfrcounter > subCountLen) {
-        level->framelevel = level->sfrsum / (subCountLen * PART_LEN);
-        level->sfrsum = 0;
-        level->sfrcounter = 0;
-
-        if (level->framelevel > 0) {
-            if (level->framelevel < level->minlevel) {
-                level->minlevel = level->framelevel;     // New minimum
-            } else {
-                level->minlevel *= (1 + 0.001f);   // Small increase
-            }
-        }
-        level->frcounter++;
-        level->frsum += level->framelevel;
-
-        if (level->frcounter > countLen) {
-            level->averagelevel =  level->frsum / countLen;
-            level->frsum = 0;
-            level->frcounter = 0;
-        }
-
+    level->frcounter++;
+    level->frsum += level->framelevel;
+    if (level->frcounter > countLen) {
+      level->averagelevel = level->frsum / countLen;
+      level->frsum = 0;
+      level->frcounter = 0;
     }
+  }
 }
 
 static void UpdateMetrics(aec_t *aec)
 {
-    float dtmp, dtmp2, dtmp3;
+    float dtmp, dtmp2;
 
     const float actThresholdNoisy = 8.0f;
     const float actThresholdClean = 40.0f;
@@ -1345,7 +1388,7 @@
         aec->stateCounter++;
     }
 
-    if (aec->farlevel.frcounter == countLen) {
+    if (aec->farlevel.frcounter == 0) {
 
         if (aec->farlevel.minlevel < noisyPower) {
             actThreshold = actThresholdClean;
@@ -1391,13 +1434,13 @@
 
             // A_NLP
             dtmp = 10 * (float)log10(aec->nearlevel.averagelevel /
-                aec->linoutlevel.averagelevel + 1e-10f);
+                (2 * aec->linoutlevel.averagelevel) + 1e-10f);
 
             // subtract noise power
-            suppressedEcho = aec->linoutlevel.averagelevel - safety * aec->linoutlevel.minlevel;
+            suppressedEcho = 2 * (aec->linoutlevel.averagelevel -
+                safety * aec->linoutlevel.minlevel);
 
             dtmp2 = 10 * (float)log10(echo / suppressedEcho + 1e-10f);
-            dtmp3 = 10 * (float)log10(aec->nearlevel.averagelevel / suppressedEcho + 1e-10f);
 
             aec->aNlp.instant = dtmp2;
             if (dtmp > aec->aNlp.max) {
@@ -1422,10 +1465,11 @@
             // ERLE
 
             // subtract noise power
-            suppressedEcho = aec->nlpoutlevel.averagelevel - safety * aec->nlpoutlevel.minlevel;
+            suppressedEcho = 2 * (aec->nlpoutlevel.averagelevel -
+                safety * aec->nlpoutlevel.minlevel);
 
             dtmp = 10 * (float)log10(aec->nearlevel.averagelevel /
-                aec->nlpoutlevel.averagelevel + 1e-10f);
+                (2 * aec->nlpoutlevel.averagelevel) + 1e-10f);
             dtmp2 = 10 * (float)log10(echo / suppressedEcho + 1e-10f);
 
             dtmp = dtmp2;
@@ -1454,3 +1498,27 @@
     }
 }
 
+static void TimeToFrequency(float time_data[PART_LEN2],
+                            float freq_data[2][PART_LEN1],
+                            int window) {
+  int i = 0;
+
+  // TODO(bjornv): Should we have a different function/wrapper for windowed FFT?
+  if (window) {
+    for (i = 0; i < PART_LEN; i++) {
+      time_data[i] *= sqrtHanning[i];
+      time_data[PART_LEN + i] *= sqrtHanning[PART_LEN - i];
+    }
+  }
+
+  aec_rdft_forward_128(time_data);
+  // Reorder.
+  freq_data[1][0] = 0;
+  freq_data[1][PART_LEN] = 0;
+  freq_data[0][0] = time_data[0];
+  freq_data[0][PART_LEN] = time_data[1];
+  for (i = 1; i < PART_LEN; i++) {
+    freq_data[0][i] = time_data[2 * i];
+    freq_data[1][i] = time_data[2 * i + 1];
+  }
+}
diff --git a/src/modules/audio_processing/aec/main/source/aec_core.h b/src/modules/audio_processing/aec/aec_core.h
similarity index 76%
rename from src/modules/audio_processing/aec/main/source/aec_core.h
rename to src/modules/audio_processing/aec/aec_core.h
index 3386b92..1b9828a 100644
--- a/src/modules/audio_processing/aec/main/source/aec_core.h
+++ b/src/modules/audio_processing/aec/aec_core.h
@@ -16,24 +16,21 @@
 #define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_AEC_CORE_H_
 
 #include <stdio.h>
-#include "typedefs.h"
-#include "signal_processing_library.h"
 
-//#define G167 // for running G167 tests
-//#define UNCONSTR // time-unconstrained filter
-//#define AEC_DEBUG // for recording files
+#include "signal_processing_library.h"
+#include "typedefs.h"
 
 #define FRAME_LEN 80
 #define PART_LEN 64 // Length of partition
 #define PART_LEN1 (PART_LEN + 1) // Unique fft coefficients
 #define PART_LEN2 (PART_LEN * 2) // Length of partition * 2
-#define NR_PART 12 // Number of partitions
-#define FILT_LEN (PART_LEN * NR_PART) // Filter length
-#define FILT_LEN2 (FILT_LEN * 2) // Double filter length
-#define FAR_BUF_LEN (FILT_LEN2 * 2)
+#define NR_PART 12  // Number of partitions in filter.
 #define PREF_BAND_SIZE 24
 
-#define BLOCKL_MAX FRAME_LEN
+// Delay estimator constants, used for logging.
+enum { kMaxDelayBlocks = 60 };
+enum { kLookaheadBlocks = 15 };
+enum { kHistorySizeBlocks = kMaxDelayBlocks + kLookaheadBlocks };
 
 typedef float complex_t[2];
 // For performance reasons, some arrays of complex numbers are replaced by twice
@@ -76,12 +73,11 @@
     int inSamples, outSamples;
     int delayEstCtr;
 
-    void *farFrBuf, *nearFrBuf, *outFrBuf;
+    void *nearFrBuf, *outFrBuf;
 
     void *nearFrBufH;
     void *outFrBufH;
 
-    float xBuf[PART_LEN2]; // farend
     float dBuf[PART_LEN2]; // nearend
     float eBuf[PART_LEN2]; // error
 
@@ -92,21 +88,13 @@
     float dMinPow[PART_LEN1];
     float dInitMinPow[PART_LEN1];
     float *noisePow;
-#ifdef FFTW
-    float fftR[PART_LEN2];
-    fftw_complex fftC[PART_LEN2];
-    fftw_plan fftPlan, ifftPlan;
 
-    fftw_complex xfBuf[NR_PART * PART_LEN1];
-    fftw_complex wfBuf[NR_PART * PART_LEN1];
-    fftw_complex sde[PART_LEN1];
-#else
     float xfBuf[2][NR_PART * PART_LEN1]; // farend fft buffer
     float wfBuf[2][NR_PART * PART_LEN1]; // filter fft
     complex_t sde[PART_LEN1]; // cross-psd of nearend and error
     complex_t sxd[PART_LEN1]; // cross-psd of farend and nearend
     complex_t xfwBuf[NR_PART * PART_LEN1]; // farend windowed fft buffer
-#endif
+
     float sx[PART_LEN1], sd[PART_LEN1], se[PART_LEN1]; // far, near and error psd
     float hNs[PART_LEN1];
     float hNlFbMin, hNlFbLocalMin;
@@ -122,9 +110,11 @@
 
     int xfBufBlockPos;
 
-    short farBuf[FILT_LEN2 * 2];
+    void* far_buf;
+    void* far_buf_windowed;
+    int system_delay;  // Current system delay buffered in AEC.
 
-    short mult; // sampling frequency multiple
+    int mult;  // sampling frequency multiple
     int sampFreq;
     WebRtc_UWord32 seed;
 
@@ -133,13 +123,6 @@
 
     int noiseEstCtr;
 
-    // Toggles for G.167 testing
-#ifdef G167
-    short adaptToggle;  // Filter adaptation
-    short nlpToggle;    // Nonlinear processing
-    short cnToggle;     // Comfort noise
-#endif
-
     power_level_t farlevel;
     power_level_t nearlevel;
     power_level_t linoutlevel;
@@ -157,11 +140,16 @@
     int flag_Hband_cn;      //for comfort noise
     float cn_scale_Hband;   //scale for comfort noise in H band
 
-#ifdef AEC_DEBUG
+    int delay_histogram[kHistorySizeBlocks];
+    int delay_logging_enabled;
+    void* delay_estimator;
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    void* far_time_buf;
     FILE *farFile;
     FILE *nearFile;
     FILE *outFile;
-    FILE *outLpFile;
+    FILE *outLinearFile;
 #endif
 } aec_t;
 
@@ -169,8 +157,6 @@
 extern WebRtcAec_FilterFar_t WebRtcAec_FilterFar;
 typedef void (*WebRtcAec_ScaleErrorSignal_t)(aec_t *aec, float ef[2][PART_LEN1]);
 extern WebRtcAec_ScaleErrorSignal_t WebRtcAec_ScaleErrorSignal;
-#define IP_LEN PART_LEN // this must be at least ceil(2 + sqrt(PART_LEN))
-#define W_LEN PART_LEN
 typedef void (*WebRtcAec_FilterAdaptation_t)
   (aec_t *aec, float *fft, float ef[2][PART_LEN1]);
 extern WebRtcAec_FilterAdaptation_t WebRtcAec_FilterAdaptation;
@@ -184,10 +170,10 @@
 void WebRtcAec_InitAec_SSE2(void);
 
 void WebRtcAec_InitMetrics(aec_t *aec);
-void WebRtcAec_ProcessFrame(aec_t *aec, const short *farend,
-                       const short *nearend, const short *nearendH,
-                       short *out, short *outH,
-                       int knownDelay);
+void WebRtcAec_BufferFarendPartition(aec_t *aec, const float* farend);
+void WebRtcAec_ProcessFrame(aec_t* aec,
+                            const short *nearend,
+                            const short *nearendH,
+                            int knownDelay);
 
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_AEC_CORE_H_
-
+#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_AEC_CORE_H_
diff --git a/src/modules/audio_processing/aec/main/source/aec_core_sse2.c b/src/modules/audio_processing/aec/aec_core_sse2.c
similarity index 94%
rename from src/modules/audio_processing/aec/main/source/aec_core_sse2.c
rename to src/modules/audio_processing/aec/aec_core_sse2.c
index 524669f..8894f28 100644
--- a/src/modules/audio_processing/aec/main/source/aec_core_sse2.c
+++ b/src/modules/audio_processing/aec/aec_core_sse2.c
@@ -12,7 +12,9 @@
  * The core AEC algorithm, SSE2 version of speed-critical functions.
  */
 
-#if defined(__SSE2__)
+#include "typedefs.h"
+
+#if defined(WEBRTC_USE_SSE2)
 #include <emmintrin.h>
 #include <math.h>
 
@@ -136,16 +138,6 @@
       xPos -= NR_PART * PART_LEN1;
     }
 
-#ifdef UNCONSTR
-    for (j = 0; j < PART_LEN1; j++) {
-      aec->wfBuf[pos + j][0] += MulRe(aec->xfBuf[xPos + j][0],
-                                      -aec->xfBuf[xPos + j][1],
-                                      ef[j][0], ef[j][1]);
-      aec->wfBuf[pos + j][1] += MulIm(aec->xfBuf[xPos + j][0],
-                                      -aec->xfBuf[xPos + j][1],
-                                      ef[j][0], ef[j][1]);
-    }
-#else
     // Process the whole array...
     for (j = 0; j < PART_LEN; j+= 4) {
       // Load xfBuf and ef.
@@ -206,18 +198,9 @@
       }
       aec->wfBuf[1][pos] = wt1;
     }
-#endif // UNCONSTR
   }
 }
 
-#ifdef _MSC_VER /* visual c++ */
-# define ALIGN16_BEG __declspec(align(16))
-# define ALIGN16_END
-#else /* gcc or icc */
-# define ALIGN16_BEG
-# define ALIGN16_END __attribute__((aligned(16)))
-#endif
-
 static __m128 mm_pow_ps(__m128 a, __m128 b)
 {
   // a^b = exp2(b * log2(a))
@@ -252,10 +235,9 @@
         {0x43BF8000, 0x43BF8000, 0x43BF8000, 0x43BF8000};
     static const int shift_exponent_into_top_mantissa = 8;
     const __m128 two_n = _mm_and_ps(a, *((__m128 *)float_exponent_mask));
-    const __m128 n_1 = (__m128)_mm_srli_epi32((__m128i)two_n,
-        shift_exponent_into_top_mantissa);
-    const __m128 n_0 = _mm_or_ps(
-        (__m128)n_1, *((__m128 *)eight_biased_exponent));
+    const __m128 n_1 = _mm_castsi128_ps(_mm_srli_epi32(_mm_castps_si128(two_n),
+        shift_exponent_into_top_mantissa));
+    const __m128 n_0 = _mm_or_ps(n_1, *((__m128 *)eight_biased_exponent));
     const __m128 n   = _mm_sub_ps(n_0,  *((__m128 *)implicit_leading_one));
 
     // Compute y.
@@ -334,8 +316,8 @@
     static const int float_exponent_shift = 23;
     const __m128i two_n_exponent = _mm_add_epi32(
         x_minus_half_floor, *((__m128i *)float_exponent_bias));
-    const __m128  two_n = (__m128)_mm_slli_epi32(
-        two_n_exponent, float_exponent_shift);
+    const __m128  two_n = _mm_castsi128_ps(_mm_slli_epi32(
+        two_n_exponent, float_exponent_shift));
     // Compute y.
     const __m128 y = _mm_sub_ps(x_max, _mm_cvtepi32_ps(x_minus_half_floor));
     // Approximate 2^y ~= C2 * y^2 + C1 * y + C0.
@@ -432,4 +414,4 @@
   WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressSSE2;
 }
 
-#endif   //__SSE2__
+#endif   // WEBRTC_USE_SSE2
diff --git a/src/modules/audio_processing/aec/main/source/aec_rdft.c b/src/modules/audio_processing/aec/aec_rdft.c
similarity index 65%
rename from src/modules/audio_processing/aec/main/source/aec_rdft.c
rename to src/modules/audio_processing/aec/aec_rdft.c
index 072a1c4..9222334 100644
--- a/src/modules/audio_processing/aec/main/source/aec_rdft.c
+++ b/src/modules/audio_processing/aec/aec_rdft.c
@@ -19,12 +19,27 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include "aec_rdft.h"
+
 #include <math.h>
 
-#include "aec_rdft.h"
 #include "system_wrappers/interface/cpu_features_wrapper.h"
+#include "typedefs.h"
 
+// constants shared by all paths (C, SSE2).
 float rdft_w[64];
+// constants used by the C path.
+float rdft_wk3ri_first[32];
+float rdft_wk3ri_second[32];
+// constants used by SSE2 but initialized in C path.
+ALIGN16_BEG float ALIGN16_END rdft_wk1r[32];
+ALIGN16_BEG float ALIGN16_END rdft_wk2r[32];
+ALIGN16_BEG float ALIGN16_END rdft_wk3r[32];
+ALIGN16_BEG float ALIGN16_END rdft_wk1i[32];
+ALIGN16_BEG float ALIGN16_END rdft_wk2i[32];
+ALIGN16_BEG float ALIGN16_END rdft_wk3i[32];
+ALIGN16_BEG float ALIGN16_END cftmdl_wk1r[4];
+
 static int ip[16];
 
 static void bitrv2_32or128(int n, int *ip, float *a) {
@@ -101,7 +116,7 @@
   }
 }
 
-static void makewt_32() {
+static void makewt_32(void) {
   const int nw = 32;
   int j, nwh;
   float delta, x, y;
@@ -123,9 +138,59 @@
     rdft_w[nw - j + 1] = x;
   }
   bitrv2_32or128(nw, ip + 2, rdft_w);
+
+  // pre-calculate constants used by cft1st_128 and cftmdl_128...
+  cftmdl_wk1r[0] = rdft_w[2];
+  cftmdl_wk1r[1] = rdft_w[2];
+  cftmdl_wk1r[2] = rdft_w[2];
+  cftmdl_wk1r[3] = -rdft_w[2];
+  {
+    int k1;
+
+    for (k1 = 0, j = 0; j < 128; j += 16, k1 += 2) {
+      const int k2 = 2 * k1;
+      const float wk2r = rdft_w[k1 + 0];
+      const float wk2i = rdft_w[k1 + 1];
+      float wk1r, wk1i;
+      // ... scalar version.
+      wk1r = rdft_w[k2 + 0];
+      wk1i = rdft_w[k2 + 1];
+      rdft_wk3ri_first[k1 + 0] = wk1r - 2 * wk2i * wk1i;
+      rdft_wk3ri_first[k1 + 1] = 2 * wk2i * wk1r - wk1i;
+      wk1r = rdft_w[k2 + 2];
+      wk1i = rdft_w[k2 + 3];
+      rdft_wk3ri_second[k1 + 0] = wk1r - 2 * wk2r * wk1i;
+      rdft_wk3ri_second[k1 + 1] = 2 * wk2r * wk1r - wk1i;
+      // ... vector version.
+      rdft_wk1r[k2 + 0] = rdft_w[k2 + 0];
+      rdft_wk1r[k2 + 1] = rdft_w[k2 + 0];
+      rdft_wk1r[k2 + 2] = rdft_w[k2 + 2];
+      rdft_wk1r[k2 + 3] = rdft_w[k2 + 2];
+      rdft_wk2r[k2 + 0] = rdft_w[k1 + 0];
+      rdft_wk2r[k2 + 1] = rdft_w[k1 + 0];
+      rdft_wk2r[k2 + 2] = -rdft_w[k1 + 1];
+      rdft_wk2r[k2 + 3] = -rdft_w[k1 + 1];
+      rdft_wk3r[k2 + 0] = rdft_wk3ri_first[k1 + 0];
+      rdft_wk3r[k2 + 1] = rdft_wk3ri_first[k1 + 0];
+      rdft_wk3r[k2 + 2] = rdft_wk3ri_second[k1 + 0];
+      rdft_wk3r[k2 + 3] = rdft_wk3ri_second[k1 + 0];
+      rdft_wk1i[k2 + 0] = -rdft_w[k2 + 1];
+      rdft_wk1i[k2 + 1] = rdft_w[k2 + 1];
+      rdft_wk1i[k2 + 2] = -rdft_w[k2 + 3];
+      rdft_wk1i[k2 + 3] = rdft_w[k2 + 3];
+      rdft_wk2i[k2 + 0] = -rdft_w[k1 + 1];
+      rdft_wk2i[k2 + 1] = rdft_w[k1 + 1];
+      rdft_wk2i[k2 + 2] = -rdft_w[k1 + 0];
+      rdft_wk2i[k2 + 3] = rdft_w[k1 + 0];
+      rdft_wk3i[k2 + 0] = -rdft_wk3ri_first[k1 + 1];
+      rdft_wk3i[k2 + 1] = rdft_wk3ri_first[k1 + 1];
+      rdft_wk3i[k2 + 2] = -rdft_wk3ri_second[k1 + 1];
+      rdft_wk3i[k2 + 3] = rdft_wk3ri_second[k1 + 1];
+    }
+  }
 }
 
-static void makect_32() {
+static void makect_32(void) {
   float *c = rdft_w + 32;
   const int nc = 32;
   int j, nch;
@@ -142,7 +207,7 @@
   }
 }
 
-static void cft1st_128(float *a) {
+static void cft1st_128_C(float *a) {
   const int n = 128;
   int j, k1, k2;
   float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
@@ -189,21 +254,21 @@
   for (j = 16; j < n; j += 16) {
     k1 += 2;
     k2 = 2 * k1;
-    wk2r = rdft_w[k1];
+    wk2r = rdft_w[k1 + 0];
     wk2i = rdft_w[k1 + 1];
-    wk1r = rdft_w[k2];
+    wk1r = rdft_w[k2 + 0];
     wk1i = rdft_w[k2 + 1];
-    wk3r = wk1r - 2 * wk2i * wk1i;
-    wk3i = 2 * wk2i * wk1r - wk1i;
-    x0r = a[j] + a[j + 2];
+    wk3r = rdft_wk3ri_first[k1 + 0];
+    wk3i = rdft_wk3ri_first[k1 + 1];
+    x0r = a[j + 0] + a[j + 2];
     x0i = a[j + 1] + a[j + 3];
-    x1r = a[j] - a[j + 2];
+    x1r = a[j + 0] - a[j + 2];
     x1i = a[j + 1] - a[j + 3];
     x2r = a[j + 4] + a[j + 6];
     x2i = a[j + 5] + a[j + 7];
     x3r = a[j + 4] - a[j + 6];
     x3i = a[j + 5] - a[j + 7];
-    a[j] = x0r + x2r;
+    a[j + 0] = x0r + x2r;
     a[j + 1] = x0i + x2i;
     x0r -= x2r;
     x0i -= x2i;
@@ -219,8 +284,8 @@
     a[j + 7] = wk3r * x0i + wk3i * x0r;
     wk1r = rdft_w[k2 + 2];
     wk1i = rdft_w[k2 + 3];
-    wk3r = wk1r - 2 * wk2r * wk1i;
-    wk3i = 2 * wk2r * wk1r - wk1i;
+    wk3r = rdft_wk3ri_second[k1 + 0];
+    wk3i = rdft_wk3ri_second[k1 + 1];
     x0r = a[j + 8] + a[j + 10];
     x0i = a[j + 9] + a[j + 11];
     x1r = a[j + 8] - a[j + 10];
@@ -246,58 +311,59 @@
   }
 }
 
-static void cftmdl_128(int l, float *a) {
+static void cftmdl_128_C(float *a) {
+  const int l = 8;
   const int n = 128;
-  int j, j1, j2, j3, k, k1, k2, m, m2;
+  const int m = 32;
+  int j0, j1, j2, j3, k, k1, k2, m2;
   float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
   float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
 
-  m = l << 2;
-  for (j = 0; j < l; j += 2) {
-    j1 = j + l;
-    j2 = j1 + l;
-    j3 = j2 + l;
-    x0r = a[j] + a[j1];
-    x0i = a[j + 1] + a[j1 + 1];
-    x1r = a[j] - a[j1];
-    x1i = a[j + 1] - a[j1 + 1];
-    x2r = a[j2] + a[j3];
+  for (j0 = 0; j0 < l; j0 += 2) {
+    j1 = j0 +  8;
+    j2 = j0 + 16;
+    j3 = j0 + 24;
+    x0r = a[j0 + 0] + a[j1 + 0];
+    x0i = a[j0 + 1] + a[j1 + 1];
+    x1r = a[j0 + 0] - a[j1 + 0];
+    x1i = a[j0 + 1] - a[j1 + 1];
+    x2r = a[j2 + 0] + a[j3 + 0];
     x2i = a[j2 + 1] + a[j3 + 1];
-    x3r = a[j2] - a[j3];
+    x3r = a[j2 + 0] - a[j3 + 0];
     x3i = a[j2 + 1] - a[j3 + 1];
-    a[j] = x0r + x2r;
-    a[j + 1] = x0i + x2i;
-    a[j2] = x0r - x2r;
+    a[j0 + 0] = x0r + x2r;
+    a[j0 + 1] = x0i + x2i;
+    a[j2 + 0] = x0r - x2r;
     a[j2 + 1] = x0i - x2i;
-    a[j1] = x1r - x3i;
+    a[j1 + 0] = x1r - x3i;
     a[j1 + 1] = x1i + x3r;
-    a[j3] = x1r + x3i;
+    a[j3 + 0] = x1r + x3i;
     a[j3 + 1] = x1i - x3r;
   }
   wk1r = rdft_w[2];
-  for (j = m; j < l + m; j += 2) {
-    j1 = j + l;
-    j2 = j1 + l;
-    j3 = j2 + l;
-    x0r = a[j] + a[j1];
-    x0i = a[j + 1] + a[j1 + 1];
-    x1r = a[j] - a[j1];
-    x1i = a[j + 1] - a[j1 + 1];
-    x2r = a[j2] + a[j3];
+  for (j0 = m; j0 < l + m; j0 += 2) {
+    j1 = j0 +  8;
+    j2 = j0 + 16;
+    j3 = j0 + 24;
+    x0r = a[j0 + 0] + a[j1 + 0];
+    x0i = a[j0 + 1] + a[j1 + 1];
+    x1r = a[j0 + 0] - a[j1 + 0];
+    x1i = a[j0 + 1] - a[j1 + 1];
+    x2r = a[j2 + 0] + a[j3 + 0];
     x2i = a[j2 + 1] + a[j3 + 1];
-    x3r = a[j2] - a[j3];
+    x3r = a[j2 + 0] - a[j3 + 0];
     x3i = a[j2 + 1] - a[j3 + 1];
-    a[j] = x0r + x2r;
-    a[j + 1] = x0i + x2i;
-    a[j2] = x2i - x0i;
+    a[j0 + 0] = x0r + x2r;
+    a[j0 + 1] = x0i + x2i;
+    a[j2 + 0] = x2i - x0i;
     a[j2 + 1] = x0r - x2r;
     x0r = x1r - x3i;
     x0i = x1i + x3r;
-    a[j1] = wk1r * (x0r - x0i);
+    a[j1 + 0] = wk1r * (x0r - x0i);
     a[j1 + 1] = wk1r * (x0r + x0i);
     x0r = x3i + x1r;
     x0i = x3r - x1i;
-    a[j3] = wk1r * (x0i - x0r);
+    a[j3 + 0] = wk1r * (x0i - x0r);
     a[j3 + 1] = wk1r * (x0i + x0r);
   }
   k1 = 0;
@@ -305,68 +371,68 @@
   for (k = m2; k < n; k += m2) {
     k1 += 2;
     k2 = 2 * k1;
-    wk2r = rdft_w[k1];
+    wk2r = rdft_w[k1 + 0];
     wk2i = rdft_w[k1 + 1];
-    wk1r = rdft_w[k2];
+    wk1r = rdft_w[k2 + 0];
     wk1i = rdft_w[k2 + 1];
-    wk3r = wk1r - 2 * wk2i * wk1i;
-    wk3i = 2 * wk2i * wk1r - wk1i;
-    for (j = k; j < l + k; j += 2) {
-      j1 = j + l;
-      j2 = j1 + l;
-      j3 = j2 + l;
-      x0r = a[j] + a[j1];
-      x0i = a[j + 1] + a[j1 + 1];
-      x1r = a[j] - a[j1];
-      x1i = a[j + 1] - a[j1 + 1];
-      x2r = a[j2] + a[j3];
+    wk3r = rdft_wk3ri_first[k1 + 0];
+    wk3i = rdft_wk3ri_first[k1 + 1];
+    for (j0 = k; j0 < l + k; j0 += 2) {
+      j1 = j0 +  8;
+      j2 = j0 + 16;
+      j3 = j0 + 24;
+      x0r = a[j0 + 0] + a[j1 + 0];
+      x0i = a[j0 + 1] + a[j1 + 1];
+      x1r = a[j0 + 0] - a[j1 + 0];
+      x1i = a[j0 + 1] - a[j1 + 1];
+      x2r = a[j2 + 0] + a[j3 + 0];
       x2i = a[j2 + 1] + a[j3 + 1];
-      x3r = a[j2] - a[j3];
+      x3r = a[j2 + 0] - a[j3 + 0];
       x3i = a[j2 + 1] - a[j3 + 1];
-      a[j] = x0r + x2r;
-      a[j + 1] = x0i + x2i;
+      a[j0 + 0] = x0r + x2r;
+      a[j0 + 1] = x0i + x2i;
       x0r -= x2r;
       x0i -= x2i;
-      a[j2] = wk2r * x0r - wk2i * x0i;
+      a[j2 + 0] = wk2r * x0r - wk2i * x0i;
       a[j2 + 1] = wk2r * x0i + wk2i * x0r;
       x0r = x1r - x3i;
       x0i = x1i + x3r;
-      a[j1] = wk1r * x0r - wk1i * x0i;
+      a[j1 + 0] = wk1r * x0r - wk1i * x0i;
       a[j1 + 1] = wk1r * x0i + wk1i * x0r;
       x0r = x1r + x3i;
       x0i = x1i - x3r;
-      a[j3] = wk3r * x0r - wk3i * x0i;
+      a[j3 + 0] = wk3r * x0r - wk3i * x0i;
       a[j3 + 1] = wk3r * x0i + wk3i * x0r;
     }
     wk1r = rdft_w[k2 + 2];
     wk1i = rdft_w[k2 + 3];
-    wk3r = wk1r - 2 * wk2r * wk1i;
-    wk3i = 2 * wk2r * wk1r - wk1i;
-    for (j = k + m; j < l + (k + m); j += 2) {
-      j1 = j + l;
-      j2 = j1 + l;
-      j3 = j2 + l;
-      x0r = a[j] + a[j1];
-      x0i = a[j + 1] + a[j1 + 1];
-      x1r = a[j] - a[j1];
-      x1i = a[j + 1] - a[j1 + 1];
-      x2r = a[j2] + a[j3];
+    wk3r = rdft_wk3ri_second[k1 + 0];
+    wk3i = rdft_wk3ri_second[k1 + 1];
+    for (j0 = k + m; j0 < l + (k + m); j0 += 2) {
+      j1 = j0 +  8;
+      j2 = j0 + 16;
+      j3 = j0 + 24;
+      x0r = a[j0 + 0] + a[j1 + 0];
+      x0i = a[j0 + 1] + a[j1 + 1];
+      x1r = a[j0 + 0] - a[j1 + 0];
+      x1i = a[j0 + 1] - a[j1 + 1];
+      x2r = a[j2 + 0] + a[j3 + 0];
       x2i = a[j2 + 1] + a[j3 + 1];
-      x3r = a[j2] - a[j3];
+      x3r = a[j2 + 0] - a[j3 + 0];
       x3i = a[j2 + 1] - a[j3 + 1];
-      a[j] = x0r + x2r;
-      a[j + 1] = x0i + x2i;
+      a[j0 + 0] = x0r + x2r;
+      a[j0 + 1] = x0i + x2i;
       x0r -= x2r;
       x0i -= x2i;
-      a[j2] = -wk2i * x0r - wk2r * x0i;
+      a[j2 + 0] = -wk2i * x0r - wk2r * x0i;
       a[j2 + 1] = -wk2i * x0i + wk2r * x0r;
       x0r = x1r - x3i;
       x0i = x1i + x3r;
-      a[j1] = wk1r * x0r - wk1i * x0i;
+      a[j1 + 0] = wk1r * x0r - wk1i * x0i;
       a[j1 + 1] = wk1r * x0i + wk1i * x0r;
       x0r = x1r + x3i;
       x0i = x1i - x3r;
-      a[j3] = wk3r * x0r - wk3i * x0i;
+      a[j3 + 0] = wk3r * x0r - wk3i * x0i;
       a[j3 + 1] = wk3r * x0i + wk3i * x0r;
     }
   }
@@ -377,7 +443,7 @@
   float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
 
   cft1st_128(a);
-  cftmdl_128(8, a);
+  cftmdl_128(a);
   l = 32;
   for (j = 0; j < l; j += 2) {
     j1 = j + l;
@@ -407,7 +473,7 @@
   float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
 
   cft1st_128(a);
-  cftmdl_128(8, a);
+  cftmdl_128(a);
   l = 32;
 
   for (j = 0; j < l; j += 2) {
@@ -479,10 +545,8 @@
 
 void aec_rdft_forward_128(float *a) {
   const int n = 128;
-  int nw;
   float xi;
 
-  nw = ip[0];
   bitrv2_32or128(n, ip + 2, a);
   cftfsub_128(a);
   rftfsub_128(a);
@@ -493,10 +557,7 @@
 
 void aec_rdft_inverse_128(float *a) {
   const int n = 128;
-  int nw;
-  float xi;
 
-  nw = ip[0];
   a[1] = 0.5f * (a[0] - a[1]);
   a[0] -= a[1];
   rftbsub_128(a);
@@ -505,14 +566,18 @@
 }
 
 // code path selection
+rft_sub_128_t cft1st_128;
+rft_sub_128_t cftmdl_128;
 rft_sub_128_t rftfsub_128;
 rft_sub_128_t rftbsub_128;
 
 void aec_rdft_init(void) {
+  cft1st_128 = cft1st_128_C;
+  cftmdl_128 = cftmdl_128_C;
   rftfsub_128 = rftfsub_128_C;
   rftbsub_128 = rftbsub_128_C;
   if (WebRtc_GetCPUInfo(kSSE2)) {
-#if defined(__SSE2__)
+#if defined(WEBRTC_USE_SSE2)
     aec_rdft_init_sse2();
 #endif
   }
diff --git a/src/modules/audio_processing/aec/aec_rdft.h b/src/modules/audio_processing/aec/aec_rdft.h
new file mode 100644
index 0000000..91bedc9
--- /dev/null
+++ b/src/modules/audio_processing/aec/aec_rdft.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_AEC_RDFT_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_AEC_RDFT_H_
+
+// These intrinsics were unavailable before VS 2008.
+// TODO(andrew): move to a common file.
+#if defined(_MSC_VER) && _MSC_VER < 1500
+#include <emmintrin.h>
+static __inline __m128 _mm_castsi128_ps(__m128i a) { return *(__m128*)&a; }
+static __inline __m128i _mm_castps_si128(__m128 a) { return *(__m128i*)&a; }
+#endif
+
+#ifdef _MSC_VER /* visual c++ */
+# define ALIGN16_BEG __declspec(align(16))
+# define ALIGN16_END
+#else /* gcc or icc */
+# define ALIGN16_BEG
+# define ALIGN16_END __attribute__((aligned(16)))
+#endif
+
+// constants shared by all paths (C, SSE2).
+extern float rdft_w[64];
+// constants used by the C path.
+extern float rdft_wk3ri_first[32];
+extern float rdft_wk3ri_second[32];
+// constants used by SSE2 but initialized in C path.
+extern float rdft_wk1r[32];
+extern float rdft_wk2r[32];
+extern float rdft_wk3r[32];
+extern float rdft_wk1i[32];
+extern float rdft_wk2i[32];
+extern float rdft_wk3i[32];
+extern float cftmdl_wk1r[4];
+
+// code path selection function pointers
+typedef void (*rft_sub_128_t)(float *a);
+extern rft_sub_128_t rftfsub_128;
+extern rft_sub_128_t rftbsub_128;
+extern rft_sub_128_t cft1st_128;
+extern rft_sub_128_t cftmdl_128;
+
+// entry points
+void aec_rdft_init(void);
+void aec_rdft_init_sse2(void);
+void aec_rdft_forward_128(float *a);
+void aec_rdft_inverse_128(float *a);
+
+#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_AEC_RDFT_H_
diff --git a/src/modules/audio_processing/aec/aec_rdft_sse2.c b/src/modules/audio_processing/aec/aec_rdft_sse2.c
new file mode 100644
index 0000000..f936e2a
--- /dev/null
+++ b/src/modules/audio_processing/aec/aec_rdft_sse2.c
@@ -0,0 +1,431 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "typedefs.h"
+
+#if defined(WEBRTC_USE_SSE2)
+#include <emmintrin.h>
+
+#include "aec_rdft.h"
+
+static const ALIGN16_BEG float ALIGN16_END k_swap_sign[4] =
+  {-1.f, 1.f, -1.f, 1.f};
+
+static void cft1st_128_SSE2(float *a) {
+  const __m128 mm_swap_sign = _mm_load_ps(k_swap_sign);
+  int j, k2;
+
+  for (k2 = 0, j = 0; j < 128; j += 16, k2 += 4) {
+          __m128 a00v   = _mm_loadu_ps(&a[j +  0]);
+          __m128 a04v   = _mm_loadu_ps(&a[j +  4]);
+          __m128 a08v   = _mm_loadu_ps(&a[j +  8]);
+          __m128 a12v   = _mm_loadu_ps(&a[j + 12]);
+          __m128 a01v   = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(1, 0, 1 ,0));
+          __m128 a23v   = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(3, 2, 3 ,2));
+          __m128 a45v   = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(1, 0, 1 ,0));
+          __m128 a67v   = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(3, 2, 3 ,2));
+
+    const __m128 wk1rv  = _mm_load_ps(&rdft_wk1r[k2]);
+    const __m128 wk1iv  = _mm_load_ps(&rdft_wk1i[k2]);
+    const __m128 wk2rv  = _mm_load_ps(&rdft_wk2r[k2]);
+    const __m128 wk2iv  = _mm_load_ps(&rdft_wk2i[k2]);
+    const __m128 wk3rv  = _mm_load_ps(&rdft_wk3r[k2]);
+    const __m128 wk3iv  = _mm_load_ps(&rdft_wk3i[k2]);
+          __m128 x0v    = _mm_add_ps(a01v, a23v);
+    const __m128 x1v    = _mm_sub_ps(a01v, a23v);
+    const __m128 x2v    = _mm_add_ps(a45v, a67v);
+    const __m128 x3v    = _mm_sub_ps(a45v, a67v);
+          __m128 x0w;
+                 a01v   = _mm_add_ps(x0v, x2v);
+                 x0v    = _mm_sub_ps(x0v, x2v);
+                 x0w    = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0 ,1));
+    {
+      const __m128 a45_0v = _mm_mul_ps(wk2rv, x0v);
+      const __m128 a45_1v = _mm_mul_ps(wk2iv, x0w);
+                   a45v   = _mm_add_ps(a45_0v, a45_1v);
+    }
+    {
+            __m128 a23_0v, a23_1v;
+      const __m128 x3w    = _mm_shuffle_ps(x3v, x3v, _MM_SHUFFLE(2, 3, 0 ,1));
+      const __m128 x3s    = _mm_mul_ps(mm_swap_sign, x3w);
+                   x0v    = _mm_add_ps(x1v, x3s);
+                   x0w    = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0 ,1));
+                   a23_0v = _mm_mul_ps(wk1rv, x0v);
+                   a23_1v = _mm_mul_ps(wk1iv, x0w);
+                   a23v   = _mm_add_ps(a23_0v, a23_1v);
+
+                   x0v    = _mm_sub_ps(x1v, x3s);
+                   x0w    = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0 ,1));
+    }
+    {
+      const __m128 a67_0v = _mm_mul_ps(wk3rv, x0v);
+      const __m128 a67_1v = _mm_mul_ps(wk3iv, x0w);
+                   a67v   = _mm_add_ps(a67_0v, a67_1v);
+    }
+
+                 a00v   = _mm_shuffle_ps(a01v, a23v, _MM_SHUFFLE(1, 0, 1 ,0));
+                 a04v   = _mm_shuffle_ps(a45v, a67v, _MM_SHUFFLE(1, 0, 1 ,0));
+                 a08v   = _mm_shuffle_ps(a01v, a23v, _MM_SHUFFLE(3, 2, 3 ,2));
+                 a12v   = _mm_shuffle_ps(a45v, a67v, _MM_SHUFFLE(3, 2, 3 ,2));
+    _mm_storeu_ps(&a[j +  0], a00v);
+    _mm_storeu_ps(&a[j +  4], a04v);
+    _mm_storeu_ps(&a[j +  8], a08v);
+    _mm_storeu_ps(&a[j + 12], a12v);
+  }
+}
+
+static void cftmdl_128_SSE2(float *a) {
+  const int l = 8;
+  const __m128 mm_swap_sign = _mm_load_ps(k_swap_sign);
+  int j0;
+
+  __m128 wk1rv = _mm_load_ps(cftmdl_wk1r);
+  for (j0 = 0; j0 < l; j0 += 2) {
+    const __m128i a_00 = _mm_loadl_epi64((__m128i*)&a[j0 +  0]);
+    const __m128i a_08 = _mm_loadl_epi64((__m128i*)&a[j0 +  8]);
+    const __m128i a_32 = _mm_loadl_epi64((__m128i*)&a[j0 + 32]);
+    const __m128i a_40 = _mm_loadl_epi64((__m128i*)&a[j0 + 40]);
+    const __m128  a_00_32 = _mm_shuffle_ps(_mm_castsi128_ps(a_00),
+                                           _mm_castsi128_ps(a_32),
+                                           _MM_SHUFFLE(1, 0, 1 ,0));
+    const __m128  a_08_40 = _mm_shuffle_ps(_mm_castsi128_ps(a_08),
+                                           _mm_castsi128_ps(a_40),
+                                           _MM_SHUFFLE(1, 0, 1 ,0));
+          __m128  x0r0_0i0_0r1_x0i1 = _mm_add_ps(a_00_32, a_08_40);
+    const __m128  x1r0_1i0_1r1_x1i1 = _mm_sub_ps(a_00_32, a_08_40);
+
+    const __m128i a_16 = _mm_loadl_epi64((__m128i*)&a[j0 + 16]);
+    const __m128i a_24 = _mm_loadl_epi64((__m128i*)&a[j0 + 24]);
+    const __m128i a_48 = _mm_loadl_epi64((__m128i*)&a[j0 + 48]);
+    const __m128i a_56 = _mm_loadl_epi64((__m128i*)&a[j0 + 56]);
+    const __m128  a_16_48 = _mm_shuffle_ps(_mm_castsi128_ps(a_16),
+                                           _mm_castsi128_ps(a_48),
+                                           _MM_SHUFFLE(1, 0, 1 ,0));
+    const __m128  a_24_56 = _mm_shuffle_ps(_mm_castsi128_ps(a_24),
+                                           _mm_castsi128_ps(a_56),
+                                           _MM_SHUFFLE(1, 0, 1 ,0));
+    const __m128  x2r0_2i0_2r1_x2i1 = _mm_add_ps(a_16_48, a_24_56);
+    const __m128  x3r0_3i0_3r1_x3i1 = _mm_sub_ps(a_16_48, a_24_56);
+
+    const __m128  xx0 = _mm_add_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+    const __m128  xx1 = _mm_sub_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+
+    const __m128  x3i0_3r0_3i1_x3r1 = _mm_castsi128_ps(
+        _mm_shuffle_epi32(_mm_castps_si128(x3r0_3i0_3r1_x3i1),
+                          _MM_SHUFFLE(2, 3, 0, 1)));
+    const __m128  x3_swapped = _mm_mul_ps(mm_swap_sign, x3i0_3r0_3i1_x3r1);
+    const __m128  x1_x3_add = _mm_add_ps(x1r0_1i0_1r1_x1i1, x3_swapped);
+    const __m128  x1_x3_sub = _mm_sub_ps(x1r0_1i0_1r1_x1i1, x3_swapped);
+
+    const __m128 yy0 = _mm_shuffle_ps(x1_x3_add, x1_x3_sub,
+                                      _MM_SHUFFLE(2, 2, 2 ,2));
+    const __m128 yy1 = _mm_shuffle_ps(x1_x3_add, x1_x3_sub,
+                                      _MM_SHUFFLE(3, 3, 3 ,3));
+    const __m128 yy2 = _mm_mul_ps(mm_swap_sign, yy1);
+    const __m128 yy3 = _mm_add_ps(yy0, yy2);
+    const __m128 yy4 = _mm_mul_ps(wk1rv, yy3);
+
+    _mm_storel_epi64((__m128i*)&a[j0 +  0], _mm_castps_si128(xx0));
+    _mm_storel_epi64((__m128i*)&a[j0 + 32],
+                     _mm_shuffle_epi32(_mm_castps_si128(xx0),
+                                       _MM_SHUFFLE(3, 2, 3, 2)));
+
+    _mm_storel_epi64((__m128i*)&a[j0 + 16], _mm_castps_si128(xx1));
+    _mm_storel_epi64((__m128i*)&a[j0 + 48],
+                     _mm_shuffle_epi32(_mm_castps_si128(xx1),
+                                       _MM_SHUFFLE(2, 3, 2, 3)));
+    a[j0 + 48] = -a[j0 + 48];
+
+    _mm_storel_epi64((__m128i*)&a[j0 +  8], _mm_castps_si128(x1_x3_add));
+    _mm_storel_epi64((__m128i*)&a[j0 + 24], _mm_castps_si128(x1_x3_sub));
+
+    _mm_storel_epi64((__m128i*)&a[j0 + 40], _mm_castps_si128(yy4));
+    _mm_storel_epi64((__m128i*)&a[j0 + 56],
+                     _mm_shuffle_epi32(_mm_castps_si128(yy4),
+                     _MM_SHUFFLE(2, 3, 2, 3)));
+  }
+
+  {
+    int k = 64;
+    int k1 = 2;
+    int k2 = 2 * k1;
+    const __m128 wk2rv = _mm_load_ps(&rdft_wk2r[k2+0]);
+    const __m128 wk2iv = _mm_load_ps(&rdft_wk2i[k2+0]);
+    const __m128 wk1iv = _mm_load_ps(&rdft_wk1i[k2+0]);
+    const __m128 wk3rv = _mm_load_ps(&rdft_wk3r[k2+0]);
+    const __m128 wk3iv = _mm_load_ps(&rdft_wk3i[k2+0]);
+                 wk1rv = _mm_load_ps(&rdft_wk1r[k2+0]);
+    for (j0 = k; j0 < l + k; j0 += 2) {
+      const __m128i a_00 = _mm_loadl_epi64((__m128i*)&a[j0 +  0]);
+      const __m128i a_08 = _mm_loadl_epi64((__m128i*)&a[j0 +  8]);
+      const __m128i a_32 = _mm_loadl_epi64((__m128i*)&a[j0 + 32]);
+      const __m128i a_40 = _mm_loadl_epi64((__m128i*)&a[j0 + 40]);
+      const __m128 a_00_32 = _mm_shuffle_ps(_mm_castsi128_ps(a_00),
+                                            _mm_castsi128_ps(a_32),
+                                            _MM_SHUFFLE(1, 0, 1 ,0));
+      const __m128 a_08_40 = _mm_shuffle_ps(_mm_castsi128_ps(a_08),
+                                            _mm_castsi128_ps(a_40),
+                                            _MM_SHUFFLE(1, 0, 1 ,0));
+            __m128 x0r0_0i0_0r1_x0i1 = _mm_add_ps(a_00_32, a_08_40);
+      const __m128 x1r0_1i0_1r1_x1i1 = _mm_sub_ps(a_00_32, a_08_40);
+
+      const __m128i a_16 = _mm_loadl_epi64((__m128i*)&a[j0 + 16]);
+      const __m128i a_24 = _mm_loadl_epi64((__m128i*)&a[j0 + 24]);
+      const __m128i a_48 = _mm_loadl_epi64((__m128i*)&a[j0 + 48]);
+      const __m128i a_56 = _mm_loadl_epi64((__m128i*)&a[j0 + 56]);
+      const __m128 a_16_48 = _mm_shuffle_ps(_mm_castsi128_ps(a_16),
+                                            _mm_castsi128_ps(a_48),
+                                            _MM_SHUFFLE(1, 0, 1 ,0));
+      const __m128 a_24_56 = _mm_shuffle_ps(_mm_castsi128_ps(a_24),
+                                            _mm_castsi128_ps(a_56),
+                                            _MM_SHUFFLE(1, 0, 1 ,0));
+      const __m128 x2r0_2i0_2r1_x2i1 = _mm_add_ps(a_16_48, a_24_56);
+      const __m128 x3r0_3i0_3r1_x3i1 = _mm_sub_ps(a_16_48, a_24_56);
+
+      const __m128 xx = _mm_add_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+      const __m128 xx1 = _mm_sub_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+      const __m128 xx2 = _mm_mul_ps(xx1 , wk2rv);
+      const __m128 xx3 = _mm_mul_ps(wk2iv,
+          _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(xx1),
+                                             _MM_SHUFFLE(2, 3, 0, 1))));
+      const __m128 xx4 = _mm_add_ps(xx2, xx3);
+
+      const __m128  x3i0_3r0_3i1_x3r1 =  _mm_castsi128_ps(
+          _mm_shuffle_epi32(_mm_castps_si128(x3r0_3i0_3r1_x3i1),
+                            _MM_SHUFFLE(2, 3, 0, 1)));
+      const __m128  x3_swapped = _mm_mul_ps(mm_swap_sign, x3i0_3r0_3i1_x3r1);
+      const __m128  x1_x3_add = _mm_add_ps(x1r0_1i0_1r1_x1i1, x3_swapped);
+      const __m128  x1_x3_sub = _mm_sub_ps(x1r0_1i0_1r1_x1i1, x3_swapped);
+
+      const __m128 xx10 = _mm_mul_ps(x1_x3_add, wk1rv);
+      const __m128 xx11 = _mm_mul_ps(wk1iv,
+          _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(x1_x3_add),
+                                             _MM_SHUFFLE(2, 3, 0, 1))));
+      const __m128 xx12 = _mm_add_ps(xx10, xx11);
+
+      const __m128 xx20 = _mm_mul_ps(x1_x3_sub, wk3rv);
+      const __m128 xx21 = _mm_mul_ps(wk3iv,
+          _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(x1_x3_sub),
+                           _MM_SHUFFLE(2, 3, 0, 1))));
+      const __m128 xx22 = _mm_add_ps(xx20, xx21);
+
+      _mm_storel_epi64((__m128i*)&a[j0 +  0], _mm_castps_si128(xx));
+      _mm_storel_epi64((__m128i*)&a[j0 + 32],
+                         _mm_shuffle_epi32(_mm_castps_si128(xx),
+                                           _MM_SHUFFLE(3, 2, 3, 2)));
+
+      _mm_storel_epi64((__m128i*)&a[j0 + 16], _mm_castps_si128(xx4));
+      _mm_storel_epi64((__m128i*)&a[j0 + 48],
+                        _mm_shuffle_epi32(_mm_castps_si128(xx4),
+                                          _MM_SHUFFLE(3, 2, 3, 2)));
+
+      _mm_storel_epi64((__m128i*)&a[j0 +  8], _mm_castps_si128(xx12));
+      _mm_storel_epi64((__m128i*)&a[j0 + 40],
+                       _mm_shuffle_epi32(_mm_castps_si128(xx12),
+                                         _MM_SHUFFLE(3, 2, 3, 2)));
+
+      _mm_storel_epi64((__m128i*)&a[j0 + 24], _mm_castps_si128(xx22));
+      _mm_storel_epi64((__m128i*)&a[j0 + 56],
+                       _mm_shuffle_epi32(_mm_castps_si128(xx22),
+                                         _MM_SHUFFLE(3, 2, 3, 2)));
+    }
+  }
+}
+
+static void rftfsub_128_SSE2(float *a) {
+  const float *c = rdft_w + 32;
+  int j1, j2, k1, k2;
+  float wkr, wki, xr, xi, yr, yi;
+
+  static const ALIGN16_BEG float ALIGN16_END k_half[4] =
+    {0.5f, 0.5f, 0.5f, 0.5f};
+  const __m128 mm_half = _mm_load_ps(k_half);
+
+  // Vectorized code (four at once).
+  //    Note: commented number are indexes for the first iteration of the loop.
+  for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) {
+    // Load 'wk'.
+    const __m128 c_j1 = _mm_loadu_ps(&c[     j1]);         //  1,  2,  3,  4,
+    const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]);         // 28, 29, 30, 31,
+    const __m128 wkrt = _mm_sub_ps(mm_half, c_k1);         // 28, 29, 30, 31,
+    const __m128 wkr_ =
+      _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3)); // 31, 30, 29, 28,
+    const __m128 wki_ = c_j1;                              //  1,  2,  3,  4,
+    // Load and shuffle 'a'.
+    const __m128 a_j2_0 = _mm_loadu_ps(&a[0   + j2]);  //   2,   3,   4,   5,
+    const __m128 a_j2_4 = _mm_loadu_ps(&a[4   + j2]);  //   6,   7,   8,   9,
+    const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]);  // 120, 121, 122, 123,
+    const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]);  // 124, 125, 126, 127,
+    const __m128 a_j2_p0 = _mm_shuffle_ps(a_j2_0, a_j2_4,
+                            _MM_SHUFFLE(2, 0, 2 ,0));  //   2,   4,   6,   8,
+    const __m128 a_j2_p1 = _mm_shuffle_ps(a_j2_0, a_j2_4,
+                            _MM_SHUFFLE(3, 1, 3 ,1));  //   3,   5,   7,   9,
+    const __m128 a_k2_p0 = _mm_shuffle_ps(a_k2_4, a_k2_0,
+                            _MM_SHUFFLE(0, 2, 0 ,2));  // 126, 124, 122, 120,
+    const __m128 a_k2_p1 = _mm_shuffle_ps(a_k2_4, a_k2_0,
+                            _MM_SHUFFLE(1, 3, 1 ,3));  // 127, 125, 123, 121,
+    // Calculate 'x'.
+    const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0);
+                                               // 2-126, 4-124, 6-122, 8-120,
+    const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1);
+                                               // 3-127, 5-125, 7-123, 9-121,
+    // Calculate product into 'y'.
+    //    yr = wkr * xr - wki * xi;
+    //    yi = wkr * xi + wki * xr;
+    const __m128 a_ = _mm_mul_ps(wkr_, xr_);
+    const __m128 b_ = _mm_mul_ps(wki_, xi_);
+    const __m128 c_ = _mm_mul_ps(wkr_, xi_);
+    const __m128 d_ = _mm_mul_ps(wki_, xr_);
+    const __m128 yr_ = _mm_sub_ps(a_, b_);     // 2-126, 4-124, 6-122, 8-120,
+    const __m128 yi_ = _mm_add_ps(c_, d_);     // 3-127, 5-125, 7-123, 9-121,
+    // Update 'a'.
+    //    a[j2 + 0] -= yr;
+    //    a[j2 + 1] -= yi;
+    //    a[k2 + 0] += yr;
+    //    a[k2 + 1] -= yi;
+    const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_);  //   2,   4,   6,   8,
+    const __m128 a_j2_p1n = _mm_sub_ps(a_j2_p1, yi_);  //   3,   5,   7,   9,
+    const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_);  // 126, 124, 122, 120,
+    const __m128 a_k2_p1n = _mm_sub_ps(a_k2_p1, yi_);  // 127, 125, 123, 121,
+    // Shuffle in right order and store.
+    const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n);
+                                                       //   2,   3,   4,   5,
+    const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n);
+                                                       //   6,   7,   8,   9,
+    const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n);
+                                                       // 122, 123, 120, 121,
+    const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n);
+                                                       // 126, 127, 124, 125,
+    const __m128 a_k2_0n = _mm_shuffle_ps(a_k2_0nt, a_k2_0nt,
+                            _MM_SHUFFLE(1, 0, 3 ,2));  // 120, 121, 122, 123,
+    const __m128 a_k2_4n = _mm_shuffle_ps(a_k2_4nt, a_k2_4nt,
+                            _MM_SHUFFLE(1, 0, 3 ,2));  // 124, 125, 126, 127,
+    _mm_storeu_ps(&a[0   + j2], a_j2_0n);
+    _mm_storeu_ps(&a[4   + j2], a_j2_4n);
+    _mm_storeu_ps(&a[122 - j2], a_k2_0n);
+    _mm_storeu_ps(&a[126 - j2], a_k2_4n);
+  }
+  // Scalar code for the remaining items.
+  for (; j2 < 64; j1 += 1, j2 += 2) {
+    k2 = 128 - j2;
+    k1 =  32 - j1;
+    wkr = 0.5f - c[k1];
+    wki = c[j1];
+    xr = a[j2 + 0] - a[k2 + 0];
+    xi = a[j2 + 1] + a[k2 + 1];
+    yr = wkr * xr - wki * xi;
+    yi = wkr * xi + wki * xr;
+    a[j2 + 0] -= yr;
+    a[j2 + 1] -= yi;
+    a[k2 + 0] += yr;
+    a[k2 + 1] -= yi;
+  }
+}
+
+static void rftbsub_128_SSE2(float *a) {
+  const float *c = rdft_w + 32;
+  int j1, j2, k1, k2;
+  float wkr, wki, xr, xi, yr, yi;
+
+  static const ALIGN16_BEG float ALIGN16_END k_half[4] =
+    {0.5f, 0.5f, 0.5f, 0.5f};
+  const __m128 mm_half = _mm_load_ps(k_half);
+
+  a[1] = -a[1];
+  // Vectorized code (four at once).
+  //    Note: commented number are indexes for the first iteration of the loop.
+  for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) {
+    // Load 'wk'.
+    const __m128 c_j1 = _mm_loadu_ps(&c[     j1]);         //  1,  2,  3,  4,
+    const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]);         // 28, 29, 30, 31,
+    const __m128 wkrt = _mm_sub_ps(mm_half, c_k1);         // 28, 29, 30, 31,
+    const __m128 wkr_ =
+      _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3)); // 31, 30, 29, 28,
+    const __m128 wki_ = c_j1;                              //  1,  2,  3,  4,
+    // Load and shuffle 'a'.
+    const __m128 a_j2_0 = _mm_loadu_ps(&a[0   + j2]);  //   2,   3,   4,   5,
+    const __m128 a_j2_4 = _mm_loadu_ps(&a[4   + j2]);  //   6,   7,   8,   9,
+    const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]);  // 120, 121, 122, 123,
+    const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]);  // 124, 125, 126, 127,
+    const __m128 a_j2_p0 = _mm_shuffle_ps(a_j2_0, a_j2_4,
+                            _MM_SHUFFLE(2, 0, 2 ,0));  //   2,   4,   6,   8,
+    const __m128 a_j2_p1 = _mm_shuffle_ps(a_j2_0, a_j2_4,
+                            _MM_SHUFFLE(3, 1, 3 ,1));  //   3,   5,   7,   9,
+    const __m128 a_k2_p0 = _mm_shuffle_ps(a_k2_4, a_k2_0,
+                            _MM_SHUFFLE(0, 2, 0 ,2));  // 126, 124, 122, 120,
+    const __m128 a_k2_p1 = _mm_shuffle_ps(a_k2_4, a_k2_0,
+                            _MM_SHUFFLE(1, 3, 1 ,3));  // 127, 125, 123, 121,
+    // Calculate 'x'.
+    const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0);
+                                               // 2-126, 4-124, 6-122, 8-120,
+    const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1);
+                                               // 3-127, 5-125, 7-123, 9-121,
+    // Calculate product into 'y'.
+    //    yr = wkr * xr + wki * xi;
+    //    yi = wkr * xi - wki * xr;
+    const __m128 a_ = _mm_mul_ps(wkr_, xr_);
+    const __m128 b_ = _mm_mul_ps(wki_, xi_);
+    const __m128 c_ = _mm_mul_ps(wkr_, xi_);
+    const __m128 d_ = _mm_mul_ps(wki_, xr_);
+    const __m128 yr_ = _mm_add_ps(a_, b_);     // 2-126, 4-124, 6-122, 8-120,
+    const __m128 yi_ = _mm_sub_ps(c_, d_);     // 3-127, 5-125, 7-123, 9-121,
+    // Update 'a'.
+    //    a[j2 + 0] = a[j2 + 0] - yr;
+    //    a[j2 + 1] = yi - a[j2 + 1];
+    //    a[k2 + 0] = yr + a[k2 + 0];
+    //    a[k2 + 1] = yi - a[k2 + 1];
+    const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_);  //   2,   4,   6,   8,
+    const __m128 a_j2_p1n = _mm_sub_ps(yi_, a_j2_p1);  //   3,   5,   7,   9,
+    const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_);  // 126, 124, 122, 120,
+    const __m128 a_k2_p1n = _mm_sub_ps(yi_, a_k2_p1);  // 127, 125, 123, 121,
+    // Shuffle in right order and store.
+    const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n);
+                                                       //   2,   3,   4,   5,
+    const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n);
+                                                       //   6,   7,   8,   9,
+    const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n);
+                                                       // 122, 123, 120, 121,
+    const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n);
+                                                       // 126, 127, 124, 125,
+    const __m128 a_k2_0n = _mm_shuffle_ps(a_k2_0nt, a_k2_0nt,
+                            _MM_SHUFFLE(1, 0, 3 ,2));  // 120, 121, 122, 123,
+    const __m128 a_k2_4n = _mm_shuffle_ps(a_k2_4nt, a_k2_4nt,
+                            _MM_SHUFFLE(1, 0, 3 ,2));  // 124, 125, 126, 127,
+    _mm_storeu_ps(&a[0   + j2], a_j2_0n);
+    _mm_storeu_ps(&a[4   + j2], a_j2_4n);
+    _mm_storeu_ps(&a[122 - j2], a_k2_0n);
+    _mm_storeu_ps(&a[126 - j2], a_k2_4n);
+  }
+  // Scalar code for the remaining items.
+  for (; j2 < 64; j1 += 1, j2 += 2) {
+    k2 = 128 - j2;
+    k1 =  32 - j1;
+    wkr = 0.5f - c[k1];
+    wki = c[j1];
+    xr = a[j2 + 0] - a[k2 + 0];
+    xi = a[j2 + 1] + a[k2 + 1];
+    yr = wkr * xr + wki * xi;
+    yi = wkr * xi - wki * xr;
+    a[j2 + 0] = a[j2 + 0] - yr;
+    a[j2 + 1] = yi - a[j2 + 1];
+    a[k2 + 0] = yr + a[k2 + 0];
+    a[k2 + 1] = yi - a[k2 + 1];
+  }
+  a[65] = -a[65];
+}
+
+void aec_rdft_init_sse2(void) {
+  cft1st_128 = cft1st_128_SSE2;
+  cftmdl_128 = cftmdl_128_SSE2;
+  rftfsub_128 = rftfsub_128_SSE2;
+  rftbsub_128 = rftbsub_128_SSE2;
+}
+
+#endif  // WEBRTC_USE_SS2
diff --git a/src/modules/audio_processing/aec/main/source/resampler.c b/src/modules/audio_processing/aec/aec_resampler.c
similarity index 94%
rename from src/modules/audio_processing/aec/main/source/resampler.c
rename to src/modules/audio_processing/aec/aec_resampler.c
index 4caa6f4..ea980cd 100644
--- a/src/modules/audio_processing/aec/main/source/resampler.c
+++ b/src/modules/audio_processing/aec/aec_resampler.c
@@ -12,19 +12,19 @@
  * skew by resampling the farend signal.
  */
 
+#include "aec_resampler.h"
+
 #include <assert.h>
 #include <stdlib.h>
 #include <string.h>
 #include <math.h>
 
-#include "resampler.h"
 #include "aec_core.h"
 
-enum { kFrameBufferSize = FRAME_LEN * 4 };
 enum { kEstimateLengthFrames = 400 };
 
 typedef struct {
-    short buffer[kFrameBufferSize];
+    short buffer[kResamplerBufferSize];
     float position;
 
     int deviceSampleRateHz;
@@ -127,7 +127,7 @@
     // Shift buffer
     memmove(obj->buffer,
             &obj->buffer[size],
-            (kFrameBufferSize - size) * sizeof(short));
+            (kResamplerBufferSize - size) * sizeof(short));
 
     return outsize;
 }
@@ -157,8 +157,8 @@
 }
 
 int EstimateSkew(const int* rawSkew,
-                 const int size,
-                 const int deviceSampleRateHz,
+                 int size,
+                 int deviceSampleRateHz,
                  float *skewEst)
 {
     const int absLimitOuter = (int)(0.04f * deviceSampleRateHz);
@@ -176,7 +176,6 @@
     float y = 0;
     float xy = 0;
     float xAvg = 0;
-    float yAvg = 0;
     float denom = 0;
     float skew = 0;
 
@@ -223,7 +222,6 @@
     }
     assert(n > 0);
     xAvg = x / n;
-    yAvg = y / n;
     denom = x2 - xAvg*x;
 
     if (denom != 0) {
diff --git a/src/modules/audio_processing/aec/main/source/resampler.h b/src/modules/audio_processing/aec/aec_resampler.h
similarity index 81%
rename from src/modules/audio_processing/aec/main/source/resampler.h
rename to src/modules/audio_processing/aec/aec_resampler.h
index 9cb2837..ab4cc6e 100644
--- a/src/modules/audio_processing/aec/main/source/resampler.h
+++ b/src/modules/audio_processing/aec/aec_resampler.h
@@ -8,10 +8,13 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_RESAMPLER_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_RESAMPLER_H_
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_
+
+#include "aec_core.h"
 
 enum { kResamplingDelay = 1 };
+enum { kResamplerBufferSize = FRAME_LEN * 4 };
 
 // Unless otherwise specified, functions return 0 on success and -1 on error
 int WebRtcAec_CreateResampler(void **resampInst);
@@ -29,4 +32,4 @@
                              float skew,
                              short *outspeech);
 
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_RESAMPLER_H_
+#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_
diff --git a/src/modules/audio_processing/aec/echo_cancellation.c b/src/modules/audio_processing/aec/echo_cancellation.c
new file mode 100644
index 0000000..66c9b97
--- /dev/null
+++ b/src/modules/audio_processing/aec/echo_cancellation.c
@@ -0,0 +1,940 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * Contains the API functions for the AEC.
+ */
+#include "echo_cancellation.h"
+
+#include <math.h>
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+#include <stdio.h>
+#endif
+#include <stdlib.h>
+#include <string.h>
+
+#include "aec_core.h"
+#include "aec_resampler.h"
+#include "ring_buffer.h"
+#include "typedefs.h"
+
+// Maximum length of resampled signal. Must be an integer multiple of frames
+// (ceil(1/(1 + MIN_SKEW)*2) + 1)*FRAME_LEN
+// The factor of 2 handles wb, and the + 1 is as a safety margin
+// TODO(bjornv): Replace with kResamplerBufferSize
+#define MAX_RESAMP_LEN (5 * FRAME_LEN)
+
+static const int kMaxBufSizeStart = 62;  // In partitions
+static const int sampMsNb = 8; // samples per ms in nb
+// Target suppression levels for nlp modes
+// log{0.001, 0.00001, 0.00000001}
+static const float targetSupp[3] = {-6.9f, -11.5f, -18.4f};
+static const float minOverDrive[3] = {1.0f, 2.0f, 5.0f};
+static const int initCheck = 42;
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+static int instance_count = 0;
+#endif
+
+typedef struct {
+    int delayCtr;
+    int sampFreq;
+    int splitSampFreq;
+    int scSampFreq;
+    float sampFactor; // scSampRate / sampFreq
+    short nlpMode;
+    short autoOnOff;
+    short activity;
+    short skewMode;
+    int bufSizeStart;
+    //short bufResetCtr;  // counts number of noncausal frames
+    int knownDelay;
+
+    short initFlag; // indicates if AEC has been initialized
+
+    // Variables used for averaging far end buffer size
+    short counter;
+    int sum;
+    short firstVal;
+    short checkBufSizeCtr;
+
+    // Variables used for delay shifts
+    short msInSndCardBuf;
+    short filtDelay;  // Filtered delay estimate.
+    int timeForDelayChange;
+    int ECstartup;
+    int checkBuffSize;
+    short lastDelayDiff;
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    void* far_pre_buf_s16;  // Time domain far-end pre-buffer in int16_t.
+    FILE *bufFile;
+    FILE *delayFile;
+    FILE *skewFile;
+#endif
+
+    // Structures
+    void *resampler;
+
+    int skewFrCtr;
+    int resample; // if the skew is small enough we don't resample
+    int highSkewCtr;
+    float skew;
+
+    void* far_pre_buf;  // Time domain far-end pre-buffer.
+
+    int lastError;
+
+    aec_t *aec;
+} aecpc_t;
+
+// Estimates delay to set the position of the far-end buffer read pointer
+// (controlled by knownDelay)
+static int EstBufDelay(aecpc_t *aecInst);
+
+WebRtc_Word32 WebRtcAec_Create(void **aecInst)
+{
+    aecpc_t *aecpc;
+    if (aecInst == NULL) {
+        return -1;
+    }
+
+    aecpc = malloc(sizeof(aecpc_t));
+    *aecInst = aecpc;
+    if (aecpc == NULL) {
+        return -1;
+    }
+
+    if (WebRtcAec_CreateAec(&aecpc->aec) == -1) {
+        WebRtcAec_Free(aecpc);
+        aecpc = NULL;
+        return -1;
+    }
+
+    if (WebRtcAec_CreateResampler(&aecpc->resampler) == -1) {
+        WebRtcAec_Free(aecpc);
+        aecpc = NULL;
+        return -1;
+    }
+    // Create far-end pre-buffer. The buffer size has to be large enough for
+    // largest possible drift compensation (kResamplerBufferSize) + "almost" an
+    // FFT buffer (PART_LEN2 - 1).
+    if (WebRtc_CreateBuffer(&aecpc->far_pre_buf,
+                            PART_LEN2 + kResamplerBufferSize,
+                            sizeof(float)) == -1) {
+        WebRtcAec_Free(aecpc);
+        aecpc = NULL;
+        return -1;
+    }
+
+    aecpc->initFlag = 0;
+    aecpc->lastError = 0;
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    if (WebRtc_CreateBuffer(&aecpc->far_pre_buf_s16,
+                            PART_LEN2 + kResamplerBufferSize,
+                            sizeof(int16_t)) == -1) {
+        WebRtcAec_Free(aecpc);
+        aecpc = NULL;
+        return -1;
+    }
+    {
+      char filename[64];
+      sprintf(filename, "aec_far%d.pcm", instance_count);
+      aecpc->aec->farFile = fopen(filename, "wb");
+      sprintf(filename, "aec_near%d.pcm", instance_count);
+      aecpc->aec->nearFile = fopen(filename, "wb");
+      sprintf(filename, "aec_out%d.pcm", instance_count);
+      aecpc->aec->outFile = fopen(filename, "wb");
+      sprintf(filename, "aec_out_linear%d.pcm", instance_count);
+      aecpc->aec->outLinearFile = fopen(filename, "wb");
+      sprintf(filename, "aec_buf%d.dat", instance_count);
+      aecpc->bufFile = fopen(filename, "wb");
+      sprintf(filename, "aec_skew%d.dat", instance_count);
+      aecpc->skewFile = fopen(filename, "wb");
+      sprintf(filename, "aec_delay%d.dat", instance_count);
+      aecpc->delayFile = fopen(filename, "wb");
+      instance_count++;
+    }
+#endif
+
+    return 0;
+}
+
+WebRtc_Word32 WebRtcAec_Free(void *aecInst)
+{
+    aecpc_t *aecpc = aecInst;
+
+    if (aecpc == NULL) {
+        return -1;
+    }
+
+    WebRtc_FreeBuffer(aecpc->far_pre_buf);
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    WebRtc_FreeBuffer(aecpc->far_pre_buf_s16);
+    fclose(aecpc->aec->farFile);
+    fclose(aecpc->aec->nearFile);
+    fclose(aecpc->aec->outFile);
+    fclose(aecpc->aec->outLinearFile);
+    fclose(aecpc->bufFile);
+    fclose(aecpc->skewFile);
+    fclose(aecpc->delayFile);
+#endif
+
+    WebRtcAec_FreeAec(aecpc->aec);
+    WebRtcAec_FreeResampler(aecpc->resampler);
+    free(aecpc);
+
+    return 0;
+}
+
+WebRtc_Word32 WebRtcAec_Init(void *aecInst, WebRtc_Word32 sampFreq, WebRtc_Word32 scSampFreq)
+{
+    aecpc_t *aecpc = aecInst;
+    AecConfig aecConfig;
+
+    if (aecpc == NULL) {
+        return -1;
+    }
+
+    if (sampFreq != 8000 && sampFreq != 16000  && sampFreq != 32000) {
+        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
+        return -1;
+    }
+    aecpc->sampFreq = sampFreq;
+
+    if (scSampFreq < 1 || scSampFreq > 96000) {
+        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
+        return -1;
+    }
+    aecpc->scSampFreq = scSampFreq;
+
+    // Initialize echo canceller core
+    if (WebRtcAec_InitAec(aecpc->aec, aecpc->sampFreq) == -1) {
+        aecpc->lastError = AEC_UNSPECIFIED_ERROR;
+        return -1;
+    }
+
+    if (WebRtcAec_InitResampler(aecpc->resampler, aecpc->scSampFreq) == -1) {
+        aecpc->lastError = AEC_UNSPECIFIED_ERROR;
+        return -1;
+    }
+
+    if (WebRtc_InitBuffer(aecpc->far_pre_buf) == -1) {
+        aecpc->lastError = AEC_UNSPECIFIED_ERROR;
+        return -1;
+    }
+    WebRtc_MoveReadPtr(aecpc->far_pre_buf, -PART_LEN);  // Start overlap.
+
+    aecpc->initFlag = initCheck;  // indicates that initialization has been done
+
+    if (aecpc->sampFreq == 32000) {
+        aecpc->splitSampFreq = 16000;
+    }
+    else {
+        aecpc->splitSampFreq = sampFreq;
+    }
+
+    aecpc->skewFrCtr = 0;
+    aecpc->activity = 0;
+
+    aecpc->delayCtr = 0;
+
+    aecpc->sum = 0;
+    aecpc->counter = 0;
+    aecpc->checkBuffSize = 1;
+    aecpc->firstVal = 0;
+
+    aecpc->ECstartup = 1;
+    aecpc->bufSizeStart = 0;
+    aecpc->checkBufSizeCtr = 0;
+    aecpc->filtDelay = 0;
+    aecpc->timeForDelayChange = 0;
+    aecpc->knownDelay = 0;
+    aecpc->lastDelayDiff = 0;
+
+    aecpc->skew = 0;
+    aecpc->resample = kAecFalse;
+    aecpc->highSkewCtr = 0;
+    aecpc->sampFactor = (aecpc->scSampFreq * 1.0f) / aecpc->splitSampFreq;
+
+    // Default settings.
+    aecConfig.nlpMode = kAecNlpModerate;
+    aecConfig.skewMode = kAecFalse;
+    aecConfig.metricsMode = kAecFalse;
+    aecConfig.delay_logging = kAecFalse;
+
+    if (WebRtcAec_set_config(aecpc, aecConfig) == -1) {
+        aecpc->lastError = AEC_UNSPECIFIED_ERROR;
+        return -1;
+    }
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    if (WebRtc_InitBuffer(aecpc->far_pre_buf_s16) == -1) {
+        aecpc->lastError = AEC_UNSPECIFIED_ERROR;
+        return -1;
+    }
+    WebRtc_MoveReadPtr(aecpc->far_pre_buf_s16, -PART_LEN);  // Start overlap.
+#endif
+
+    return 0;
+}
+
+// only buffer L band for farend
+WebRtc_Word32 WebRtcAec_BufferFarend(void *aecInst, const WebRtc_Word16 *farend,
+    WebRtc_Word16 nrOfSamples)
+{
+    aecpc_t *aecpc = aecInst;
+    WebRtc_Word32 retVal = 0;
+    int newNrOfSamples = (int) nrOfSamples;
+    short newFarend[MAX_RESAMP_LEN];
+    const int16_t* farend_ptr = farend;
+    float tmp_farend[MAX_RESAMP_LEN];
+    const float* farend_float = tmp_farend;
+    float skew;
+    int i = 0;
+
+    if (aecpc == NULL) {
+        return -1;
+    }
+
+    if (farend == NULL) {
+        aecpc->lastError = AEC_NULL_POINTER_ERROR;
+        return -1;
+    }
+
+    if (aecpc->initFlag != initCheck) {
+        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
+        return -1;
+    }
+
+    // number of samples == 160 for SWB input
+    if (nrOfSamples != 80 && nrOfSamples != 160) {
+        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
+        return -1;
+    }
+
+    skew = aecpc->skew;
+
+    if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
+        // Resample and get a new number of samples
+        newNrOfSamples = WebRtcAec_ResampleLinear(aecpc->resampler,
+                                                  farend,
+                                                  nrOfSamples,
+                                                  skew,
+                                                  newFarend);
+        farend_ptr = (const int16_t*) newFarend;
+    }
+
+    aecpc->aec->system_delay += newNrOfSamples;
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    WebRtc_WriteBuffer(aecpc->far_pre_buf_s16, farend_ptr,
+                       (size_t) newNrOfSamples);
+#endif
+    // Cast to float and write the time-domain data to |far_pre_buf|.
+    for (i = 0; i < newNrOfSamples; i++) {
+      tmp_farend[i] = (float) farend_ptr[i];
+    }
+    WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_float,
+                       (size_t) newNrOfSamples);
+
+    // Transform to frequency domain if we have enough data.
+    while (WebRtc_available_read(aecpc->far_pre_buf) >= PART_LEN2) {
+      // We have enough data to pass to the FFT, hence read PART_LEN2 samples.
+      WebRtc_ReadBuffer(aecpc->far_pre_buf, (void**) &farend_float, tmp_farend,
+                        PART_LEN2);
+
+      WebRtcAec_BufferFarendPartition(aecpc->aec, farend_float);
+
+      // Rewind |far_pre_buf| PART_LEN samples for overlap before continuing.
+      WebRtc_MoveReadPtr(aecpc->far_pre_buf, -PART_LEN);
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+      WebRtc_ReadBuffer(aecpc->far_pre_buf_s16, (void**) &farend_ptr, newFarend,
+                        PART_LEN2);
+      WebRtc_WriteBuffer(aecpc->aec->far_time_buf, &farend_ptr[PART_LEN], 1);
+      WebRtc_MoveReadPtr(aecpc->far_pre_buf_s16, -PART_LEN);
+#endif
+    }
+
+    return retVal;
+}
+
+WebRtc_Word32 WebRtcAec_Process(void *aecInst, const WebRtc_Word16 *nearend,
+    const WebRtc_Word16 *nearendH, WebRtc_Word16 *out, WebRtc_Word16 *outH,
+    WebRtc_Word16 nrOfSamples, WebRtc_Word16 msInSndCardBuf, WebRtc_Word32 skew)
+{
+    aecpc_t *aecpc = aecInst;
+    WebRtc_Word32 retVal = 0;
+    short i;
+    short nBlocks10ms;
+    short nFrames;
+    // Limit resampling to doubling/halving of signal
+    const float minSkewEst = -0.5f;
+    const float maxSkewEst = 1.0f;
+
+    if (aecpc == NULL) {
+        return -1;
+    }
+
+    if (nearend == NULL) {
+        aecpc->lastError = AEC_NULL_POINTER_ERROR;
+        return -1;
+    }
+
+    if (out == NULL) {
+        aecpc->lastError = AEC_NULL_POINTER_ERROR;
+        return -1;
+    }
+
+    if (aecpc->initFlag != initCheck) {
+        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
+        return -1;
+    }
+
+    // number of samples == 160 for SWB input
+    if (nrOfSamples != 80 && nrOfSamples != 160) {
+        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
+        return -1;
+    }
+
+    // Check for valid pointers based on sampling rate
+    if (aecpc->sampFreq == 32000 && nearendH == NULL) {
+       aecpc->lastError = AEC_NULL_POINTER_ERROR;
+       return -1;
+    }
+
+    if (msInSndCardBuf < 0) {
+        msInSndCardBuf = 0;
+        aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
+        retVal = -1;
+    }
+    else if (msInSndCardBuf > 500) {
+        msInSndCardBuf = 500;
+        aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
+        retVal = -1;
+    }
+    // TODO(andrew): we need to investigate if this +10 is really wanted.
+    msInSndCardBuf += 10;
+    aecpc->msInSndCardBuf = msInSndCardBuf;
+
+    if (aecpc->skewMode == kAecTrue) {
+        if (aecpc->skewFrCtr < 25) {
+            aecpc->skewFrCtr++;
+        }
+        else {
+            retVal = WebRtcAec_GetSkew(aecpc->resampler, skew, &aecpc->skew);
+            if (retVal == -1) {
+                aecpc->skew = 0;
+                aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
+            }
+
+            aecpc->skew /= aecpc->sampFactor*nrOfSamples;
+
+            if (aecpc->skew < 1.0e-3 && aecpc->skew > -1.0e-3) {
+                aecpc->resample = kAecFalse;
+            }
+            else {
+                aecpc->resample = kAecTrue;
+            }
+
+            if (aecpc->skew < minSkewEst) {
+                aecpc->skew = minSkewEst;
+            }
+            else if (aecpc->skew > maxSkewEst) {
+                aecpc->skew = maxSkewEst;
+            }
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+            fwrite(&aecpc->skew, sizeof(aecpc->skew), 1, aecpc->skewFile);
+#endif
+        }
+    }
+
+    nFrames = nrOfSamples / FRAME_LEN;
+    nBlocks10ms = nFrames / aecpc->aec->mult;
+
+    if (aecpc->ECstartup) {
+        if (nearend != out) {
+            // Only needed if they don't already point to the same place.
+            memcpy(out, nearend, sizeof(short) * nrOfSamples);
+        }
+
+        // The AEC is in the start up mode
+        // AEC is disabled until the system delay is OK
+
+        // Mechanism to ensure that the system delay is reasonably stable.
+        if (aecpc->checkBuffSize) {
+            aecpc->checkBufSizeCtr++;
+            // Before we fill up the far-end buffer we require the system delay
+            // to be stable (+/-8 ms) compared to the first value. This
+            // comparison is made during the following 6 consecutive 10 ms
+            // blocks. If it seems to be stable then we start to fill up the
+            // far-end buffer.
+            if (aecpc->counter == 0) {
+                aecpc->firstVal = aecpc->msInSndCardBuf;
+                aecpc->sum = 0;
+            }
+
+            if (abs(aecpc->firstVal - aecpc->msInSndCardBuf) <
+                WEBRTC_SPL_MAX(0.2 * aecpc->msInSndCardBuf, sampMsNb)) {
+                aecpc->sum += aecpc->msInSndCardBuf;
+                aecpc->counter++;
+            }
+            else {
+                aecpc->counter = 0;
+            }
+
+            if (aecpc->counter * nBlocks10ms >= 6) {
+                // The far-end buffer size is determined in partitions of
+                // PART_LEN samples. Use 75% of the average value of the system
+                // delay as buffer size to start with.
+                aecpc->bufSizeStart = WEBRTC_SPL_MIN((3 * aecpc->sum *
+                  aecpc->aec->mult * 8) / (4 * aecpc->counter * PART_LEN),
+                  kMaxBufSizeStart);
+                // Buffer size has now been determined.
+                aecpc->checkBuffSize = 0;
+            }
+
+            if (aecpc->checkBufSizeCtr * nBlocks10ms > 50) {
+                // For really bad systems, don't disable the echo canceller for
+                // more than 0.5 sec.
+                aecpc->bufSizeStart = WEBRTC_SPL_MIN((aecpc->msInSndCardBuf *
+                    aecpc->aec->mult * 3) / 40, kMaxBufSizeStart);
+                aecpc->checkBuffSize = 0;
+            }
+        }
+
+        // If |checkBuffSize| changed in the if-statement above.
+        if (!aecpc->checkBuffSize) {
+            // The system delay is now reasonably stable (or has been unstable
+            // for too long). When the far-end buffer is filled with
+            // approximately the same amount of data as reported by the system
+            // we end the startup phase.
+            int overhead_elements = aecpc->aec->system_delay / PART_LEN -
+                aecpc->bufSizeStart;
+            if (overhead_elements == 0) {
+                // Enable the AEC
+                aecpc->ECstartup = 0;
+            } else if (overhead_elements > 0) {
+                WebRtc_MoveReadPtr(aecpc->aec->far_buf_windowed,
+                                   overhead_elements);
+                WebRtc_MoveReadPtr(aecpc->aec->far_buf, overhead_elements);
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+                WebRtc_MoveReadPtr(aecpc->aec->far_time_buf, overhead_elements);
+#endif
+                // TODO(bjornv): Do we need a check on how much we actually
+                // moved the read pointer? It should always be possible to move
+                // the pointer |overhead_elements| since we have only added data
+                // to the buffer and no delay compensation nor AEC processing
+                // has been done.
+                aecpc->aec->system_delay -= overhead_elements * PART_LEN;
+
+                // Enable the AEC
+                aecpc->ECstartup = 0;
+            }
+        }
+    } else {
+        // AEC is enabled.
+
+        int out_elements = 0;
+
+        EstBufDelay(aecpc);
+
+        // Note that 1 frame is supported for NB and 2 frames for WB.
+        for (i = 0; i < nFrames; i++) {
+            int16_t* out_ptr = NULL;
+            int16_t out_tmp[FRAME_LEN];
+
+            // Call the AEC.
+            WebRtcAec_ProcessFrame(aecpc->aec,
+                                   &nearend[FRAME_LEN * i],
+                                   &nearendH[FRAME_LEN * i],
+                                   aecpc->knownDelay);
+            // TODO(bjornv): Re-structure such that we don't have to pass
+            // |aecpc->knownDelay| as input. Change name to something like
+            // |system_buffer_diff|.
+
+            // Stuff the out buffer if we have less than a frame to output.
+            // This should only happen for the first frame.
+            out_elements = (int) WebRtc_available_read(aecpc->aec->outFrBuf);
+            if (out_elements < FRAME_LEN) {
+                WebRtc_MoveReadPtr(aecpc->aec->outFrBuf,
+                                   out_elements - FRAME_LEN);
+                if (aecpc->sampFreq == 32000) {
+                    WebRtc_MoveReadPtr(aecpc->aec->outFrBufH,
+                                       out_elements - FRAME_LEN);
+                }
+            }
+
+            // Obtain an output frame.
+            WebRtc_ReadBuffer(aecpc->aec->outFrBuf, (void**) &out_ptr,
+                              out_tmp, FRAME_LEN);
+            memcpy(&out[FRAME_LEN * i], out_ptr, sizeof(int16_t) * FRAME_LEN);
+            // For H band
+            if (aecpc->sampFreq == 32000) {
+                WebRtc_ReadBuffer(aecpc->aec->outFrBufH, (void**) &out_ptr,
+                                  out_tmp, FRAME_LEN);
+                memcpy(&outH[FRAME_LEN * i], out_ptr,
+                       sizeof(int16_t) * FRAME_LEN);
+            }
+        }
+    }
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+    {
+        int16_t far_buf_size_ms = (int16_t) (aecpc->aec->system_delay /
+            (sampMsNb * aecpc->aec->mult));
+        fwrite(&far_buf_size_ms, 2, 1, aecpc->bufFile);
+        fwrite(&(aecpc->knownDelay), sizeof(aecpc->knownDelay), 1, aecpc->delayFile);
+    }
+#endif
+
+    return retVal;
+}
+
+WebRtc_Word32 WebRtcAec_set_config(void *aecInst, AecConfig config)
+{
+    aecpc_t *aecpc = aecInst;
+
+    if (aecpc == NULL) {
+        return -1;
+    }
+
+    if (aecpc->initFlag != initCheck) {
+        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
+        return -1;
+    }
+
+    if (config.skewMode != kAecFalse && config.skewMode != kAecTrue) {
+        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
+        return -1;
+    }
+    aecpc->skewMode = config.skewMode;
+
+    if (config.nlpMode != kAecNlpConservative && config.nlpMode !=
+            kAecNlpModerate && config.nlpMode != kAecNlpAggressive) {
+        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
+        return -1;
+    }
+    aecpc->nlpMode = config.nlpMode;
+    aecpc->aec->targetSupp = targetSupp[aecpc->nlpMode];
+    aecpc->aec->minOverDrive = minOverDrive[aecpc->nlpMode];
+
+    if (config.metricsMode != kAecFalse && config.metricsMode != kAecTrue) {
+        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
+        return -1;
+    }
+    aecpc->aec->metricsMode = config.metricsMode;
+    if (aecpc->aec->metricsMode == kAecTrue) {
+        WebRtcAec_InitMetrics(aecpc->aec);
+    }
+
+  if (config.delay_logging != kAecFalse && config.delay_logging != kAecTrue) {
+    aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
+    return -1;
+  }
+  aecpc->aec->delay_logging_enabled = config.delay_logging;
+  if (aecpc->aec->delay_logging_enabled == kAecTrue) {
+    memset(aecpc->aec->delay_histogram, 0, sizeof(aecpc->aec->delay_histogram));
+  }
+
+    return 0;
+}
+
+WebRtc_Word32 WebRtcAec_get_config(void *aecInst, AecConfig *config)
+{
+    aecpc_t *aecpc = aecInst;
+
+    if (aecpc == NULL) {
+        return -1;
+    }
+
+    if (config == NULL) {
+        aecpc->lastError = AEC_NULL_POINTER_ERROR;
+        return -1;
+    }
+
+    if (aecpc->initFlag != initCheck) {
+        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
+        return -1;
+    }
+
+    config->nlpMode = aecpc->nlpMode;
+    config->skewMode = aecpc->skewMode;
+    config->metricsMode = aecpc->aec->metricsMode;
+    config->delay_logging = aecpc->aec->delay_logging_enabled;
+
+    return 0;
+}
+
+WebRtc_Word32 WebRtcAec_get_echo_status(void *aecInst, WebRtc_Word16 *status)
+{
+    aecpc_t *aecpc = aecInst;
+
+    if (aecpc == NULL) {
+        return -1;
+    }
+
+    if (status == NULL) {
+        aecpc->lastError = AEC_NULL_POINTER_ERROR;
+        return -1;
+    }
+
+    if (aecpc->initFlag != initCheck) {
+        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
+        return -1;
+    }
+
+    *status = aecpc->aec->echoState;
+
+    return 0;
+}
+
+WebRtc_Word32 WebRtcAec_GetMetrics(void *aecInst, AecMetrics *metrics)
+{
+    const float upweight = 0.7f;
+    float dtmp;
+    short stmp;
+    aecpc_t *aecpc = aecInst;
+
+    if (aecpc == NULL) {
+        return -1;
+    }
+
+    if (metrics == NULL) {
+        aecpc->lastError = AEC_NULL_POINTER_ERROR;
+        return -1;
+    }
+
+    if (aecpc->initFlag != initCheck) {
+        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
+        return -1;
+    }
+
+    // ERL
+    metrics->erl.instant = (short) aecpc->aec->erl.instant;
+
+    if ((aecpc->aec->erl.himean > offsetLevel) && (aecpc->aec->erl.average > offsetLevel)) {
+    // Use a mix between regular average and upper part average
+        dtmp = upweight * aecpc->aec->erl.himean + (1 - upweight) * aecpc->aec->erl.average;
+        metrics->erl.average = (short) dtmp;
+    }
+    else {
+        metrics->erl.average = offsetLevel;
+    }
+
+    metrics->erl.max = (short) aecpc->aec->erl.max;
+
+    if (aecpc->aec->erl.min < (offsetLevel * (-1))) {
+        metrics->erl.min = (short) aecpc->aec->erl.min;
+    }
+    else {
+        metrics->erl.min = offsetLevel;
+    }
+
+    // ERLE
+    metrics->erle.instant = (short) aecpc->aec->erle.instant;
+
+    if ((aecpc->aec->erle.himean > offsetLevel) && (aecpc->aec->erle.average > offsetLevel)) {
+        // Use a mix between regular average and upper part average
+        dtmp =  upweight * aecpc->aec->erle.himean + (1 - upweight) * aecpc->aec->erle.average;
+        metrics->erle.average = (short) dtmp;
+    }
+    else {
+        metrics->erle.average = offsetLevel;
+    }
+
+    metrics->erle.max = (short) aecpc->aec->erle.max;
+
+    if (aecpc->aec->erle.min < (offsetLevel * (-1))) {
+        metrics->erle.min = (short) aecpc->aec->erle.min;
+    } else {
+        metrics->erle.min = offsetLevel;
+    }
+
+    // RERL
+    if ((metrics->erl.average > offsetLevel) && (metrics->erle.average > offsetLevel)) {
+        stmp = metrics->erl.average + metrics->erle.average;
+    }
+    else {
+        stmp = offsetLevel;
+    }
+    metrics->rerl.average = stmp;
+
+    // No other statistics needed, but returned for completeness
+    metrics->rerl.instant = stmp;
+    metrics->rerl.max = stmp;
+    metrics->rerl.min = stmp;
+
+    // A_NLP
+    metrics->aNlp.instant = (short) aecpc->aec->aNlp.instant;
+
+    if ((aecpc->aec->aNlp.himean > offsetLevel) && (aecpc->aec->aNlp.average > offsetLevel)) {
+        // Use a mix between regular average and upper part average
+        dtmp =  upweight * aecpc->aec->aNlp.himean + (1 - upweight) * aecpc->aec->aNlp.average;
+        metrics->aNlp.average = (short) dtmp;
+    }
+    else {
+        metrics->aNlp.average = offsetLevel;
+    }
+
+    metrics->aNlp.max = (short) aecpc->aec->aNlp.max;
+
+    if (aecpc->aec->aNlp.min < (offsetLevel * (-1))) {
+        metrics->aNlp.min = (short) aecpc->aec->aNlp.min;
+    }
+    else {
+        metrics->aNlp.min = offsetLevel;
+    }
+
+    return 0;
+}
+
+int WebRtcAec_GetDelayMetrics(void* handle, int* median, int* std) {
+  aecpc_t* self = handle;
+  int i = 0;
+  int delay_values = 0;
+  int num_delay_values = 0;
+  int my_median = 0;
+  const int kMsPerBlock = (PART_LEN * 1000) / self->splitSampFreq;
+  float l1_norm = 0;
+
+  if (self == NULL) {
+    return -1;
+  }
+  if (median == NULL) {
+    self->lastError = AEC_NULL_POINTER_ERROR;
+    return -1;
+  }
+  if (std == NULL) {
+    self->lastError = AEC_NULL_POINTER_ERROR;
+    return -1;
+  }
+  if (self->initFlag != initCheck) {
+    self->lastError = AEC_UNINITIALIZED_ERROR;
+    return -1;
+  }
+  if (self->aec->delay_logging_enabled == 0) {
+    // Logging disabled
+    self->lastError = AEC_UNSUPPORTED_FUNCTION_ERROR;
+    return -1;
+  }
+
+  // Get number of delay values since last update
+  for (i = 0; i < kHistorySizeBlocks; i++) {
+    num_delay_values += self->aec->delay_histogram[i];
+  }
+  if (num_delay_values == 0) {
+    // We have no new delay value data. Even though -1 is a valid estimate, it
+    // will practically never be used since multiples of |kMsPerBlock| will
+    // always be returned.
+    *median = -1;
+    *std = -1;
+    return 0;
+  }
+
+  delay_values = num_delay_values >> 1; // Start value for median count down
+  // Get median of delay values since last update
+  for (i = 0; i < kHistorySizeBlocks; i++) {
+    delay_values -= self->aec->delay_histogram[i];
+    if (delay_values < 0) {
+      my_median = i;
+      break;
+    }
+  }
+  // Account for lookahead.
+  *median = (my_median - kLookaheadBlocks) * kMsPerBlock;
+
+  // Calculate the L1 norm, with median value as central moment
+  for (i = 0; i < kHistorySizeBlocks; i++) {
+    l1_norm += (float) (fabs(i - my_median) * self->aec->delay_histogram[i]);
+  }
+  *std = (int) (l1_norm / (float) num_delay_values + 0.5f) * kMsPerBlock;
+
+  // Reset histogram
+  memset(self->aec->delay_histogram, 0, sizeof(self->aec->delay_histogram));
+
+  return 0;
+}
+
+WebRtc_Word32 WebRtcAec_get_version(WebRtc_Word8 *versionStr, WebRtc_Word16 len)
+{
+    const char version[] = "AEC 2.5.0";
+    const short versionLen = (short)strlen(version) + 1; // +1 for null-termination
+
+    if (versionStr == NULL) {
+        return -1;
+    }
+
+    if (versionLen > len) {
+        return -1;
+    }
+
+    strncpy(versionStr, version, versionLen);
+    return 0;
+}
+
+WebRtc_Word32 WebRtcAec_get_error_code(void *aecInst)
+{
+    aecpc_t *aecpc = aecInst;
+
+    if (aecpc == NULL) {
+        return -1;
+    }
+
+    return aecpc->lastError;
+}
+
+static int EstBufDelay(aecpc_t* aecpc) {
+  int nSampSndCard = aecpc->msInSndCardBuf * sampMsNb * aecpc->aec->mult;
+  int current_delay = nSampSndCard - aecpc->aec->system_delay;
+  int delay_difference = 0;
+
+  // Before we proceed with the delay estimate filtering we:
+  // 1) Compensate for the frame that will be read.
+  // 2) Compensate for drift resampling.
+
+  // 1) Compensating for the frame(s) that will be read/processed.
+  current_delay += FRAME_LEN * aecpc->aec->mult;
+
+  // 2) Account for resampling frame delay.
+  if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
+    current_delay -= kResamplingDelay;
+  }
+
+  aecpc->filtDelay = WEBRTC_SPL_MAX(0, (short) (0.8 * aecpc->filtDelay +
+          0.2 * current_delay));
+
+  delay_difference = aecpc->filtDelay - aecpc->knownDelay;
+  if (delay_difference > 224) {
+    if (aecpc->lastDelayDiff < 96) {
+      aecpc->timeForDelayChange = 0;
+    } else {
+      aecpc->timeForDelayChange++;
+    }
+  } else if (delay_difference < 96 && aecpc->knownDelay > 0) {
+    if (aecpc->lastDelayDiff > 224) {
+      aecpc->timeForDelayChange = 0;
+    } else {
+      aecpc->timeForDelayChange++;
+    }
+  } else {
+    aecpc->timeForDelayChange = 0;
+  }
+  aecpc->lastDelayDiff = delay_difference;
+
+  if (aecpc->timeForDelayChange > 25) {
+    aecpc->knownDelay = WEBRTC_SPL_MAX((int) aecpc->filtDelay - 160, 0);
+  }
+
+  return 0;
+}
diff --git a/src/modules/audio_processing/aec/main/interface/echo_cancellation.h b/src/modules/audio_processing/aec/interface/echo_cancellation.h
similarity index 92%
rename from src/modules/audio_processing/aec/main/interface/echo_cancellation.h
rename to src/modules/audio_processing/aec/interface/echo_cancellation.h
index 883357d..4da6e73 100644
--- a/src/modules/audio_processing/aec/main/interface/echo_cancellation.h
+++ b/src/modules/audio_processing/aec/interface/echo_cancellation.h
@@ -38,6 +38,7 @@
     WebRtc_Word16 nlpMode;        // default kAecNlpModerate
     WebRtc_Word16 skewMode;       // default kAecFalse
     WebRtc_Word16 metricsMode;    // default kAecFalse
+    int delay_logging;            // default kAecFalse
     //float realSkew;
 } AecConfig;
 
@@ -66,7 +67,7 @@
  * Inputs                       Description
  * -------------------------------------------------------------------
  * void **aecInst               Pointer to the AEC instance to be created
- *                              and initilized
+ *                              and initialized
  *
  * Outputs                      Description
  * -------------------------------------------------------------------
@@ -226,6 +227,23 @@
 WebRtc_Word32 WebRtcAec_GetMetrics(void *aecInst, AecMetrics *metrics);
 
 /*
+ * Gets the current delay metrics for the session.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*      handle            Pointer to the AEC instance
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int*       median            Delay median value.
+ * int*       std               Delay standard deviation.
+ *
+ * int        return             0: OK
+ *                              -1: error
+ */
+int WebRtcAec_GetDelayMetrics(void* handle, int* median, int* std);
+
+/*
  * Gets the last error code.
  *
  * Inputs                       Description
diff --git a/src/modules/audio_processing/aec/main/matlab/fullaec.m b/src/modules/audio_processing/aec/main/matlab/fullaec.m
deleted file mode 100644
index 0f86a8c..0000000
--- a/src/modules/audio_processing/aec/main/matlab/fullaec.m
+++ /dev/null
@@ -1,953 +0,0 @@
-% Partitioned block frequency domain adaptive filtering NLMS and 
-% standard time-domain sample-based NLMS 
-%fid=fopen('aecFar-samsung.pcm', 'rb'); % Load far end
-fid=fopen('aecFar.pcm', 'rb'); % Load far end
-%fid=fopen(farFile, 'rb'); % Load far end
-rrin=fread(fid,inf,'int16');
-fclose(fid); 
-%rrin=loadsl('data/far_me2.pcm'); % Load far end
-%fid=fopen('aecNear-samsung.pcm', 'rb'); % Load near end
-fid=fopen('aecNear.pcm', 'rb'); % Load near end
-%fid=fopen(nearFile, 'rb'); % Load near end
-ssin=fread(fid,inf,'int16');
-%ssin = [zeros(1024,1) ; ssin(1:end-1024)];
-
-fclose(fid);
-rand('state',13);
-fs=16000;
-mult=fs/8000;
-%rrin=rrin(fs*0+1:round(fs*120));
-%ssin=ssin(fs*0+1:round(fs*120));
-if fs == 8000
-	cohRange = 2:3;
-elseif fs==16000
-	cohRange = 2;
-end
-
-% Flags
-NLPon=1;  % NLP
-CNon=1; % Comfort noise
-PLTon=1;  % Plotting
-
-M = 16; % Number of partitions
-N = 64; % Partition length
-L = M*N; % Filter length 
-if fs == 8000
-    mufb = 0.6;
-else
-    mufb = 0.5;  
-end
-%mufb=1;  
-VADtd=48;
-alp = 0.1; % Power estimation factor alc = 0.1; % Coherence estimation factor
-beta = 0.9; % Plotting factor 
-%% Changed a little %%
-step = 0.3;%0.1875; % Downward step size 
-%%
-if fs == 8000
-    threshold=2e-6;  % DTrob threshold
-else
-    %threshold=0.7e-6;
-    threshold=1.5e-6; end
-
-if fs == 8000
-    echoBandRange = ceil(300*2/fs*N):floor(1800*2/fs*N);
-    %echoBandRange = ceil(1500*2/fs*N):floor(2500*2/fs*N);
-else
-    echoBandRange = ceil(300*2/fs*N):floor(1800*2/fs*N);
-    %echoBandRange = ceil(300*2/fs*N):floor(1800*2/fs*N);
-end
-%echoBandRange = ceil(1600*2/fs*N):floor(1900*2/fs*N);
-%echoBandRange = ceil(2000*2/fs*N):floor(4000*2/fs*N);
-suppState = 1;
-transCtr = 0;
-
-Nt=1;
-vt=1;
-
-ramp = 1.0003; % Upward ramp
-rampd = 0.999; % Downward ramp
-cvt = 20; % Subband VAD threshold;
-nnthres = 20; % Noise threshold 
-
-shh=logspace(-1.3,-2.2,N+1)';
-sh=[shh;flipud(shh(2:end-1))]; % Suppression profile
-
-len=length(ssin);
-w=zeros(L,1); % Sample-based TD NLMS 
-WFb=zeros(N+1,M); % Block-based FD NLMS
-WFbOld=zeros(N+1,M); % Block-based FD NLMS
-YFb=zeros(N+1,M);
-erfb=zeros(len,1);
-erfb3=zeros(len,1);
-
-ercn=zeros(len,1);
-zm=zeros(N,1);
-XFm=zeros(N+1,M);
-YFm=zeros(N+1,M);
-pn0=10*ones(N+1,1);
-pn=zeros(N+1,1);
-NN=len;
-Nb=floor(NN/N)-M;
-erifb=zeros(Nb+1,1)+0.1;
-erifb3=zeros(Nb+1,1)+0.1;
-ericn=zeros(Nb+1,1)+0.1;
-dri=zeros(Nb+1,1)+0.1;
-start=1;
-xo=zeros(N,1);
-do=xo;
-eo=xo;
-
-echoBands=zeros(Nb+1,1);
-cohxdAvg=zeros(Nb+1,1);
-cohxdSlow=zeros(Nb+1,N+1);
-cohedSlow=zeros(Nb+1,N+1);
-%overdriveM=zeros(Nb+1,N+1);
-cohxdFastAvg=zeros(Nb+1,1);
-cohxdAvgBad=zeros(Nb+1,1);
-cohedAvg=zeros(Nb+1,1);
-cohedFastAvg=zeros(Nb+1,1);
-hnledAvg=zeros(Nb+1,1);
-hnlxdAvg=zeros(Nb+1,1);
-ovrdV=zeros(Nb+1,1);
-dIdxV=zeros(Nb+1,1);
-SLxV=zeros(Nb+1,1);
-hnlSortQV=zeros(Nb+1,1);
-hnlPrefAvgV=zeros(Nb+1,1);
-mutInfAvg=zeros(Nb+1,1);
-%overdrive=zeros(Nb+1,1);
-hnled = zeros(N+1, 1);
-weight=zeros(N+1,1);
-hnlMax = zeros(N+1, 1);
-hnl = zeros(N+1, 1);
-overdrive = ones(1, N+1);
-xfwm=zeros(N+1,M);
-dfm=zeros(N+1,M);
-WFbD=ones(N+1,1);
-
-fbSupp = 0;
-hnlLocalMin = 1;
-cohxdLocalMin = 1;
-hnlLocalMinV=zeros(Nb+1,1);
-cohxdLocalMinV=zeros(Nb+1,1);
-hnlMinV=zeros(Nb+1,1);
-dkEnV=zeros(Nb+1,1);
-ekEnV=zeros(Nb+1,1);
-ovrd = 2;
-ovrdPos = floor((N+1)/4);
-ovrdSm = 2;
-hnlMin = 1;
-minCtr = 0;
-SeMin = 0;
-SdMin = 0;
-SeLocalAvg = 0;
-SeMinSm = 0;
-divergeFact = 1;
-dIdx = 1;
-hnlMinCtr = 0;
-hnlNewMin = 0;
-divergeState = 0;
-
-Sy=ones(N+1,1);
-Sym=1e7*ones(N+1,1);
-
-wins=[0;sqrt(hanning(2*N-1))];
-ubufn=zeros(2*N,1);
-ebuf=zeros(2*N,1);
-ebuf2=zeros(2*N,1);
-ebuf4=zeros(2*N,1);
-mbuf=zeros(2*N,1);
-
-cohedFast = zeros(N+1,1);
-cohxdFast = zeros(N+1,1);
-cohxd = zeros(N+1,1);
-Se = zeros(N+1,1);
-Sd = zeros(N+1,1);
-Sx = zeros(N+1,1);
-SxBad = zeros(N+1,1);
-Sed = zeros(N+1,1);
-Sxd = zeros(N+1,1);
-SxdBad = zeros(N+1,1);
-hnledp=[];
-
-cohxdMax = 0;
-
-%hh=waitbar(0,'Please wait...');
-progressbar(0);
-
-%spaces = ' ';
-%spaces = repmat(spaces, 50, 1);
-%spaces = ['[' ; spaces ; ']'];
-%fprintf(1, spaces);
-%fprintf(1, '\n');
-
-for kk=1:Nb
-    pos = N * (kk-1) + start;
-    
-    % FD block method
-    % ----------------------   Organize data
-    xk = rrin(pos:pos+N-1);
-    dk = ssin(pos:pos+N-1);
-    
-    xx = [xo;xk];
-    xo = xk;
-    tmp = fft(xx); 
-	XX = tmp(1:N+1);
-
-	dd = [do;dk];  % Overlap
-	do = dk;
-	tmp = fft(dd); % Frequency domain 
-	DD = tmp(1:N+1);
-    
-    % ------------------------  Power estimation
-    pn0 = (1 - alp) * pn0 + alp * real(XX.* conj(XX));
-    pn = pn0;
-    %pn = (1 - alp) * pn + alp * M * pn0;
-	if (CNon)
-		Yp =  real(conj(DD).*DD); % Instantaneous power      
-		Sy =  (1 - alp) * Sy + alp * Yp; % Averaged power    
-		
-		mm = min(Sy,Sym);  
-		diff = Sym - mm;
-		if (kk>50)
-			Sym = (mm + step*diff) * ramp; % Estimated background noise power   
-		end
-	end
-    
-    % ----------------------   Filtering   
-    XFm(:,1) = XX;
-    for mm=0:(M-1)
-        m=mm+1;  
-        YFb(:,m) = XFm(:,m) .* WFb(:,m);
-    end
-    yfk = sum(YFb,2);
-	tmp = [yfk ; flipud(conj(yfk(2:N)))];
-    ykt = real(ifft(tmp));
-    ykfb = ykt(end-N+1:end); 
-    
-    % ----------------------   Error estimation 
-    ekfb = dk - ykfb; 
-    %if sum(abs(ekfb)) < sum(abs(dk))
-        %ekfb = dk - ykfb; 
-    %    erfb(pos:pos+N-1) = ekfb; 
-    %else
-        %ekfb = dk;
-    %    erfb(pos:pos+N-1) = dk; 
-    %end
-	%(kk-1)*(N*2)+1
-    erfb(pos:pos+N-1) = ekfb; 
-    tmp = fft([zm;ekfb]);      % FD version for cancelling part (overlap-save)
-	Ek = tmp(1:N+1);
-
-    % ------------------------  Adaptation  
-	Ek2 = Ek ./(M*pn + 0.001); % Normalized error
-	%Ek2 = Ek ./(pn + 0.001); % Normalized error
-	%Ek2 = Ek ./(100*pn + 0.001); % Normalized error
-	
-	absEf = max(abs(Ek2), threshold);
-	absEf = ones(N+1,1)*threshold./absEf;
-	Ek2 = Ek2.*absEf;
-	
-	mEk = mufb.*Ek2;
-	PP = conj(XFm).*(ones(M,1) * mEk')'; 
-	tmp = [PP ; flipud(conj(PP(2:N,:)))];
-	IFPP = real(ifft(tmp));
-	PH = IFPP(1:N,:);
-	tmp = fft([PH;zeros(N,M)]);
-	FPH = tmp(1:N+1,:);
-	WFb = WFb + FPH;
-
-    if mod(kk, 10*mult) == 0
-        WFbEn = sum(real(WFb.*conj(WFb)));
-        %WFbEn = sum(abs(WFb));
-        [tmp, dIdx] = max(WFbEn);
-
-        WFbD = sum(abs(WFb(:, dIdx)),2);
-        %WFbD = WFbD / (mean(WFbD) + 1e-10); 
-        WFbD = min(max(WFbD, 0.5), 4);
-    end
-    dIdxV(kk) = dIdx;
-    
-	% NLP
-	if (NLPon)
-        
-        ee = [eo;ekfb]; 
-        eo = ekfb;
-		window = wins;
-        if fs == 8000
-            %gamma = 0.88;
-            gamma = 0.9;
-        else
-		    %gamma = 0.92;
-		    gamma = 0.93;
-        end
-		%gamma = 0.9;
-
-		tmp = fft(xx.*window);
-		xf = tmp(1:N+1);
-		tmp = fft(dd.*window);
-		df = tmp(1:N+1);
-		tmp = fft(ee.*window);
-		ef = tmp(1:N+1);
-
-        xfwm(:,1) = xf;
-        xf = xfwm(:,dIdx);
-        %fprintf(1,'%d: %f\n', kk, xf(4));
-        dfm(:,1) = df;
-        
-        SxOld = Sx;
-
-		Se = gamma*Se + (1-gamma)*real(ef.*conj(ef));
-		Sd = gamma*Sd + (1-gamma)*real(df.*conj(df));
-		Sx = gamma*Sx + (1 - gamma)*real(xf.*conj(xf));
-
-        %xRatio = real(xfwm(:,1).*conj(xfwm(:,1))) ./ ...
-        %    (real(xfwm(:,2).*conj(xfwm(:,2))) + 1e-10);
-        %xRatio = Sx ./ (SxOld + 1e-10);
-        %SLx = log(1/(N+1)*sum(xRatio)) - 1/(N+1)*sum(log(xRatio));
-        %SLxV(kk) = SLx;
-
-        %freqSm = 0.9;
-        %Sx = filter(freqSm, [1 -(1-freqSm)], Sx);
-        %Sx(end:1) = filter(freqSm, [1 -(1-freqSm)], Sx(end:1));
-        %Se = filter(freqSm, [1 -(1-freqSm)], Se);
-        %Se(end:1) = filter(freqSm, [1 -(1-freqSm)], Se(end:1));
-        %Sd = filter(freqSm, [1 -(1-freqSm)], Sd);
-        %Sd(end:1) = filter(freqSm, [1 -(1-freqSm)], Sd(end:1));
-
-		%SeFast = ef.*conj(ef);
-		%SdFast = df.*conj(df);
-        %SxFast = xf.*conj(xf);
-        %cohedFast = 0.9*cohedFast + 0.1*SeFast ./ (SdFast + 1e-10);
-        %cohedFast(find(cohedFast > 1)) = 1;
-        %cohedFast(find(cohedFast > 1)) = 1 ./ cohedFast(find(cohedFast>1));
-        %cohedFastAvg(kk) = mean(cohedFast(echoBandRange));
-        %cohedFastAvg(kk) = min(cohedFast);
-
-        %cohxdFast = 0.8*cohxdFast + 0.2*log(SdFast ./ (SxFast + 1e-10));
-        %cohxdFastAvg(kk) = mean(cohxdFast(echoBandRange));
-
-		% coherence
-        Sxd = gamma*Sxd + (1 - gamma)*xf.*conj(df);
-		Sed = gamma*Sed + (1-gamma)*ef.*conj(df);
-
-        %Sxd = filter(freqSm, [1 -(1-freqSm)], Sxd);
-        %Sxd(end:1) = filter(freqSm, [1 -(1-freqSm)], Sxd(end:1));
-        %Sed = filter(freqSm, [1 -(1-freqSm)], Sed);
-        %Sed(end:1) = filter(freqSm, [1 -(1-freqSm)], Sed(end:1));
-
-		cohed = real(Sed.*conj(Sed))./(Se.*Sd + 1e-10);
-        %cohedAvg(kk) = mean(cohed(echoBandRange));
-        %cohedAvg(kk) = cohed(6);
-        %cohedAvg(kk) = min(cohed);
-
-		cohxd = real(Sxd.*conj(Sxd))./(Sx.*Sd + 1e-10);
-        %freqSm = 0.5;
-        %cohxd(3:end) = filter(freqSm, [1 -(1-freqSm)], cohxd(3:end));
-        %cohxd(end:3) = filter(freqSm, [1 -(1-freqSm)], cohxd(end:3));
-        %cohxdAvg(kk) = mean(cohxd(echoBandRange));
-        %cohxdAvg(kk) = (cohxd(32));
-        %cohxdAvg(kk) = max(cohxd);
-
-        %xf = xfm(:,dIdx);
-		%SxBad = gamma*SxBad + (1 - gamma)*real(xf.*conj(xf));
-        %SxdBad = gamma*SxdBad + (1 - gamma)*xf.*conj(df);
-		%cohxdBad = real(SxdBad.*conj(SxdBad))./(SxBad.*Sd + 0.01);
-        %cohxdAvgBad(kk) = mean(cohxdBad);
-
-        %for j=1:N+1
-        %    mutInf(j) = 0.9*mutInf(j) + 0.1*information(abs(xfm(j,:)), abs(dfm(j,:)));
-        %end
-        %mutInfAvg(kk) = mean(mutInf);
-
-        %hnled = cohedFast;
-        %xIdx = find(cohxd > 1 - cohed);
-        %hnled(xIdx) = 1 - cohxd(xIdx);
-        %hnled = 1 - max(cohxd, 1-cohedFast);
-        hnled = min(1 - cohxd, cohed);
-        %hnled = 1 - cohxd;
-        %hnled = max(1 - (cohxd + (1-cohedFast)), 0);
-        %hnled = 1 - max(cohxd, 1-cohed);
-
-        if kk > 1
-            cohxdSlow(kk,:) = 0.99*cohxdSlow(kk-1,:) + 0.01*cohxd';
-            cohedSlow(kk,:) = 0.99*cohedSlow(kk-1,:) + 0.01*(1-cohed)';
-        end
-
-
-        if 0
-        %if kk > 50
-            %idx = find(hnled > 0.3);
-            hnlMax = hnlMax*0.9999;
-            %hnlMax(idx) = max(hnlMax(idx), hnled(idx));
-            hnlMax = max(hnlMax, hnled);
-            %overdrive(idx) = max(log(hnlMax(idx))/log(0.99), 1); 
-            avgHnl = mean(hnlMax(echoBandRange));
-            if avgHnl > 0.3
-                overdrive = max(log(avgHnl)/log(0.99), 1);
-            end
-            weight(4:end) = max(hnlMax) - hnlMax(4:end);
-        end
-        
-        
-
-        %[hg, gidx] = max(hnled); 
-        %fnrg = Sx(gidx) / (Sd(gidx) + 1e-10);
-        
-        %[tmp, bidx] = find((Sx / Sd + 1e-10) > fnrg);
-        %hnled(bidx) = hg;
-
-
-		%cohed1 = mean(cohed(cohRange)); % range depends on bandwidth 
-		%cohed1 = cohed1^2;
-        %echoBands(kk) = length(find(cohed(echoBandRange) < 0.25))/length(echoBandRange);
-
-        %if (fbSupp == 0)
-        %    if (echoBands(kk) > 0.8)
-        %        fbSupp = 1;
-        %    end
-        %else
-        %    if (echoBands(kk) < 0.6)
-        %        fbSupp = 0;
-        %    end
-        %end
-        %overdrive(kk) = 7.5*echoBands(kk) + 0.5; 
-        
-		% Factor by which to weight other bands
-		%if (cohed1 < 0.1)
-		%	w = 0.8 - cohed1*10*0.4;
-		%else
-		%	w = 0.4;
-		%end
-			
-		% Weight coherence subbands
-		%hnled = w*cohed1 + (1 - w)*cohed;
-		%hnled = (hnled).^2;
-		%cohed(floor(N/2):end) = cohed(floor(N/2):end).^2;
-        %if fbSupp == 1
-        %    cohed = zeros(size(cohed));
-        %end
-        %cohed = cohed.^overdrive(kk);
-
-        %hnled = gamma*hnled + (1 - gamma)*cohed;
-		% Additional hf suppression
-		%hnledp = [hnledp ; mean(hnled)];
-		%hnled(floor(N/2):end) = hnled(floor(N/2):end).^2;
-		%ef = ef.*((weight*(min(1 - hnled)).^2 + (1 - weight).*(1 - hnled)).^2);
-
-        cohedMean = mean(cohed(echoBandRange));
-        %aggrFact = 4*(1-mean(hnled(echoBandRange))) + 1;
-        %[hnlSort, hnlSortIdx] = sort(hnled(echoBandRange));
-        [hnlSort, hnlSortIdx] = sort(1-cohxd(echoBandRange));
-        [xSort, xSortIdx] = sort(Sx);
-        %aggrFact = (1-mean(hnled(echoBandRange)));
-        %hnlSortQ = hnlSort(qIdx);
-        hnlSortQ = mean(1 - cohxd(echoBandRange));
-        %hnlSortQ = mean(1 - cohxd);
-
-        [hnlSort2, hnlSortIdx2] = sort(hnled(echoBandRange));
-        %[hnlSort2, hnlSortIdx2] = sort(hnled);
-        hnlQuant = 0.75;
-        hnlQuantLow = 0.5;
-        qIdx = floor(hnlQuant*length(hnlSort2));
-        qIdxLow = floor(hnlQuantLow*length(hnlSort2));
-        hnlPrefAvg = hnlSort2(qIdx);
-        hnlPrefAvgLow = hnlSort2(qIdxLow);
-        %hnlPrefAvgLow = mean(hnled);
-        %hnlPrefAvg = max(hnlSort2);
-        %hnlPrefAvgLow = min(hnlSort2);
-
-        %hnlPref = hnled(echoBandRange);
-        %hnlPrefAvg = mean(hnlPref(xSortIdx((0.5*length(xSortIdx)):end)));
-
-        %hnlPrefAvg = min(hnlPrefAvg, hnlSortQ);
-
-        %hnlSortQIdx = hnlSortIdx(qIdx);
-        %SeQ = Se(qIdx + echoBandRange(1) - 1); 
-        %SdQ = Sd(qIdx + echoBandRange(1) - 1); 
-        %SeQ = Se(qIdxLow + echoBandRange(1) - 1); 
-        %SdQ = Sd(qIdxLow + echoBandRange(1) - 1); 
-        %propLow = length(find(hnlSort < 0.1))/length(hnlSort);
-        %aggrFact = min((1 - hnlSortQ)/2, 0.5);
-        %aggrTerm = 1/aggrFact;
-
-        %hnlg = mean(hnled(echoBandRange));
-        %hnlg = hnlSortQ;
-        %if suppState == 0
-        %    if hnlg < 0.05
-        %        suppState = 2;
-        %        transCtr = 0;
-        %    elseif hnlg < 0.75
-        %        suppState = 1;
-        %        transCtr = 0;
-        %    end
-        %elseif suppState == 1
-        %    if hnlg > 0.8
-        %        suppState = 0;
-        %        transCtr = 0;
-        %    elseif hnlg < 0.05
-        %        suppState = 2;
-        %        transCtr = 0;
-        %    end
-        %else
-        %    if hnlg > 0.8
-        %        suppState = 0;
-        %        transCtr = 0;
-        %    elseif hnlg > 0.25
-        %        suppState = 1;
-        %        transCtr = 0;
-        %    end
-        %end
-        %if kk > 50
-
-            if cohedMean > 0.98 & hnlSortQ > 0.9
-                %if suppState == 1
-                %    hnled = 0.5*hnled + 0.5*cohed;
-                %    %hnlSortQ = 0.5*hnlSortQ + 0.5*cohedMean;
-                %    hnlPrefAvg = 0.5*hnlPrefAvg + 0.5*cohedMean;
-                %else
-                %    hnled = cohed;
-                %    %hnlSortQ = cohedMean;
-                %    hnlPrefAvg = cohedMean;
-                %end
-                suppState = 0;
-            elseif cohedMean < 0.95 | hnlSortQ < 0.8
-                %if suppState == 0
-                %    hnled = 0.5*hnled + 0.5*cohed;
-                %    %hnlSortQ = 0.5*hnlSortQ + 0.5*cohedMean;
-                %    hnlPrefAvg = 0.5*hnlPrefAvg + 0.5*cohedMean;
-                %end
-                suppState = 1;
-            end
-
-            if hnlSortQ < cohxdLocalMin & hnlSortQ < 0.75
-                cohxdLocalMin = hnlSortQ;
-            end
-
-            if cohxdLocalMin == 1
-                ovrd = 3;
-                hnled = 1-cohxd;
-                hnlPrefAvg = hnlSortQ;
-                hnlPrefAvgLow = hnlSortQ;
-            end
-
-            if suppState == 0
-                hnled = cohed;
-                hnlPrefAvg = cohedMean;
-                hnlPrefAvgLow = cohedMean;
-            end
-
-            %if hnlPrefAvg < hnlLocalMin & hnlPrefAvg < 0.6
-            if hnlPrefAvgLow < hnlLocalMin & hnlPrefAvgLow < 0.6
-                %hnlLocalMin = hnlPrefAvg;
-                %hnlMin = hnlPrefAvg;
-                hnlLocalMin = hnlPrefAvgLow;
-                hnlMin = hnlPrefAvgLow;
-                hnlNewMin = 1;
-                hnlMinCtr = 0;
-                %if hnlMinCtr == 0
-                %    hnlMinCtr = hnlMinCtr + 1;
-                %else
-                %    hnlMinCtr = 0;
-                %    hnlMin = hnlLocalMin;
-                    %SeLocalMin = SeQ;
-                    %SdLocalMin = SdQ;
-                    %SeLocalAvg = 0;
-                    %minCtr = 0;
-                %    ovrd = max(log(0.0001)/log(hnlMin), 2);
-                    %divergeFact = hnlLocalMin;
-            end
-
-            if hnlNewMin == 1
-                hnlMinCtr = hnlMinCtr + 1;
-            end
-            if hnlMinCtr == 2 
-                hnlNewMin = 0;
-                hnlMinCtr = 0;
-                %ovrd = max(log(0.0001)/log(hnlMin), 2);
-                ovrd = max(log(0.00001)/(log(hnlMin + 1e-10) + 1e-10), 3);
-                %ovrd = max(log(0.00000001)/(log(hnlMin + 1e-10) + 1e-10), 5);
-                %ovrd = max(log(0.0001)/log(hnlPrefAvg), 2);
-                %ovrd = max(log(0.001)/log(hnlMin), 2);
-            end
-            hnlLocalMin = min(hnlLocalMin + 0.0008/mult, 1);
-            cohxdLocalMin = min(cohxdLocalMin + 0.0004/mult, 1);
-            %divergeFact = hnlSortQ;
-
-
-            %if minCtr > 0 & hnlLocalMin < 1
-            %    hnlMin = hnlLocalMin;
-            %    %SeMin = 0.9*SeMin + 0.1*sqrt(SeLocalMin);
-            %    SdMin = sqrt(SdLocalMin);
-            %    %SeMin = sqrt(SeLocalMin)*hnlSortQ;
-            %    SeMin = sqrt(SeLocalMin);
-            %    %ovrd = log(100/SeMin)/log(hnlSortQ);
-            %    %ovrd = log(100/SeMin)/log(hnlSortQ);
-            %    ovrd = log(0.01)/log(hnlMin);
-            %    ovrd = max(ovrd, 2);
-            %    ovrdPos = hnlSortQIdx;
-            %    %ovrd = max(ovrd, 1);
-            %    %SeMin = sqrt(SeLocalAvg/5);
-            %    minCtr = 0;
-            %else
-            %    %SeLocalMin = 0.9*SeLocalMin +0.1*SeQ;
-            %    SeLocalAvg = SeLocalAvg + SeQ;
-            %    minCtr = minCtr + 1;
-            %end
-
-            if ovrd < ovrdSm
-                ovrdSm = 0.99*ovrdSm + 0.01*ovrd;
-            else
-                ovrdSm = 0.9*ovrdSm + 0.1*ovrd;
-            end
-        %end
-
-        %ekEn = sum(real(ekfb.^2));
-        %dkEn = sum(real(dk.^2));
-        ekEn = sum(Se);
-        dkEn = sum(Sd);
-
-        if divergeState == 0
-            if ekEn > dkEn
-                ef = df;
-                divergeState = 1;
-                %hnlPrefAvg = hnlSortQ;
-                %hnled = (1 - cohxd);
-            end
-        else
-            %if ekEn*1.1 < dkEn  
-            %if ekEn*1.26 < dkEn  
-            if ekEn*1.05 < dkEn  
-                divergeState = 0;
-            else
-                ef = df;
-            end
-        end
-
-        if ekEn > dkEn*19.95
-            WFb=zeros(N+1,M); % Block-based FD NLMS
-        end
-
-        ekEnV(kk) = ekEn;
-        dkEnV(kk) = dkEn;
-
-        hnlLocalMinV(kk) = hnlLocalMin;
-        cohxdLocalMinV(kk) = cohxdLocalMin;
-        hnlMinV(kk) = hnlMin;
-        %cohxdMaxLocal = max(cohxdSlow(kk,:));
-        %if kk > 50
-        %cohxdMaxLocal = 1-hnlSortQ;
-        %if cohxdMaxLocal > 0.5
-        %    %if cohxdMaxLocal > cohxdMax 
-        %        odScale = max(log(cohxdMaxLocal)/log(0.95), 1);
-        %        %overdrive(7:end) = max(log(cohxdSlow(kk,7:end))/log(0.9), 1);
-        %        cohxdMax = cohxdMaxLocal;
-        %    end
-        %end
-        %end
-        %cohxdMax = cohxdMax*0.999;
-
-        %overdriveM(kk,:) = max(overdrive, 1);
-        %aggrFact = 0.25;
-        aggrFact = 0.3;
-        %aggrFact = 0.5*propLow;
-        %if fs == 8000
-        %    wCurve = [0 ; 0 ; aggrFact*sqrt(linspace(0,1,N-1))' + 0.1];
-        %else
-        %    wCurve = [0; 0; 0; aggrFact*sqrt(linspace(0,1,N-2))' + 0.1];
-        %end
-        wCurve = [0; aggrFact*sqrt(linspace(0,1,N))' + 0.1];
-        % For sync with C
-        %if fs == 8000
-        %    wCurve = wCurve(2:end);
-        %else
-        %    wCurve = wCurve(1:end-1);
-        %end
-        %weight = aggrFact*(sqrt(linspace(0,1,N+1)'));
-        %weight = aggrFact*wCurve;
-        weight = wCurve;
-        %weight = aggrFact*ones(N+1,1);
-        %weight = zeros(N+1,1);
-        %hnled = weight.*min(hnled) + (1 - weight).*hnled;
-        %hnled = weight.*min(mean(hnled(echoBandRange)), hnled) + (1 - weight).*hnled;
-        %hnled = weight.*min(hnlSortQ, hnled) + (1 - weight).*hnled;
-
-        %hnlSortQV(kk) = mean(hnled);
-        %hnlPrefAvgV(kk) = mean(hnled(echoBandRange));
-
-        hnled = weight.*min(hnlPrefAvg, hnled) + (1 - weight).*hnled;
-
-        %od = aggrFact*(sqrt(linspace(0,1,N+1)') + aggrTerm);
-        %od = 4*(sqrt(linspace(0,1,N+1)') + 1/4);
-
-        %ovrdFact = (ovrdSm - 1) / sqrt(ovrdPos/(N+1));
-        %ovrdFact = ovrdSm / sqrt(echoBandRange(floor(length(echoBandRange)/2))/(N+1));
-        %od = ovrdFact*sqrt(linspace(0,1,N+1))' + 1;
-        %od = ovrdSm*ones(N+1,1).*abs(WFb(:,dIdx))/(max(abs(WFb(:,dIdx)))+1e-10);
-
-        %od = ovrdSm*ones(N+1,1);
-        %od = ovrdSm*WFbD.*(sqrt(linspace(0,1,N+1))' + 1);
-
-        od = ovrdSm*(sqrt(linspace(0,1,N+1))' + 1);
-        %od = 4*(sqrt(linspace(0,1,N+1))' + 1);
-
-        %od = 2*ones(N+1,1);
-        %od = 2*ones(N+1,1);
-        %sshift = ((1-hnled)*2-1).^3+1; 
-        sshift = ones(N+1,1);
-
-        hnled = hnled.^(od.*sshift);
-
-        %if hnlg > 0.75
-            %if (suppState ~= 0)
-            %    transCtr = 0;
-            %end
-        %    suppState = 0;
-        %elseif hnlg < 0.6 & hnlg > 0.2
-        %    suppState = 1;
-        %elseif hnlg < 0.1
-            %hnled = zeros(N+1, 1);
-            %if (suppState ~= 2)
-            %    transCtr = 0;
-            %end
-        %    suppState = 2;
-        %else
-        %    if (suppState ~= 2)
-        %        transCtr = 0;
-        %    end
-        %    suppState = 2;
-        %end
-        %if suppState == 0
-        %    hnled = ones(N+1, 1);
-        %elseif suppState == 2
-        %    hnled = zeros(N+1, 1);
-        %end
-        %hnled(find(hnled < 0.1)) = 0;
-        %hnled = hnled.^2;
-        %if transCtr < 5
-            %hnl = 0.75*hnl + 0.25*hnled;
-        %    transCtr = transCtr + 1;
-        %else
-            hnl = hnled;
-        %end
-        %hnled(find(hnled < 0.05)) = 0;
-        ef = ef.*(hnl);
-
-        %ef = ef.*(min(1 - cohxd, cohed).^2);
-        %ef = ef.*((1-cohxd).^2);
-        
-        ovrdV(kk) = ovrdSm;
-        %ovrdV(kk) = dIdx;
-        %ovrdV(kk) = divergeFact;
-        %hnledAvg(kk) = 1-mean(1-cohedFast(echoBandRange));
-        hnledAvg(kk) = 1-mean(1-cohed(echoBandRange));
-        hnlxdAvg(kk) = 1-mean(cohxd(echoBandRange));
-        %hnlxdAvg(kk) = cohxd(5);
-        %hnlSortQV(kk) = mean(hnled);
-        hnlSortQV(kk) = hnlPrefAvgLow;
-        hnlPrefAvgV(kk) = hnlPrefAvg;
-        %hnlAvg(kk) = propLow;
-        %ef(N/2:end) = 0;
-        %ner = (sum(Sd) ./ (sum(Se.*(hnl.^2)) + 1e-10));
-
-		% Comfort noise
-		if (CNon)
-			snn=sqrt(Sym);
-			snn(1)=0; % Reject LF noise
-			Un=snn.*exp(j*2*pi.*[0;rand(N-1,1);0]);
-
-			% Weight comfort noise by suppression
-			Un = sqrt(1-hnled.^2).*Un;
-			Fmix = ef + Un;
-		else
-			Fmix = ef;
-		end
-
-		% Overlap and add in time domain for smoothness 
-		tmp = [Fmix ; flipud(conj(Fmix(2:N)))];
-		mixw = wins.*real(ifft(tmp));
-		mola  = mbuf(end-N+1:end) + mixw(1:N);
-		mbuf = mixw;
-		ercn(pos:pos+N-1) = mola; 
-	end % NLPon
-
-    % Filter update
-	%Ek2 = Ek ./(12*pn + 0.001); % Normalized error
-    %Ek2 = Ek2 * divergeFact;
-	%Ek2 = Ek ./(pn + 0.001); % Normalized error
-	%Ek2 = Ek ./(100*pn + 0.001); % Normalized error
-
-    %divergeIdx = find(abs(Ek) > abs(DD));
-    %divergeIdx = find(Se > Sd);
-    %threshMod = threshold*ones(N+1,1); 
-    %if length(divergeIdx) > 0
-    %if sum(abs(Ek)) > sum(abs(DD))
-        %WFb(divergeIdx,:) = WFb(divergeIdx,:) .* repmat(sqrt(Sd(divergeIdx)./(Se(divergeIdx)+1e-10))),1,M);
-        %Ek2(divergeIdx) = Ek2(divergeIdx) .* sqrt(Sd(divergeIdx)./(Se(divergeIdx)+1e-10));
-        %Ek2(divergeIdx) = Ek2(divergeIdx) .* abs(DD(divergeIdx))./(abs(Ek(divergeIdx))+1e-10);
-        %WFb(divergeIdx,:) = WFbOld(divergeIdx,:);
-        %WFb = WFbOld;
-        %threshMod(divergeIdx) = threshMod(divergeIdx) .* abs(DD(divergeIdx))./(abs(Ek(divergeIdx))+1e-10);
-    %    threshMod(divergeIdx) = threshMod(divergeIdx) .* sqrt(Sd(divergeIdx)./(Se(divergeIdx)+1e-10));
-    %end
-	
-	%absEf = max(abs(Ek2), threshold);
-	%absEf = ones(N+1,1)*threshold./absEf;
-	%absEf = max(abs(Ek2), threshMod);
-	%absEf = threshMod./absEf;
-	%Ek2 = Ek2.*absEf;
-
-    %if sum(Se) <= sum(Sd)
-
-    %    mEk = mufb.*Ek2;
-    %    PP = conj(XFm).*(ones(M,1) * mEk')'; 
-    %    tmp = [PP ; flipud(conj(PP(2:N,:)))];
-    %    IFPP = real(ifft(tmp));
-    %    PH = IFPP(1:N,:);
-    %    tmp = fft([PH;zeros(N,M)]);
-    %    FPH = tmp(1:N+1,:);
-    %    %WFbOld = WFb;
-    %    WFb = WFb + FPH;
-
-    %else
-    %    WF = WFbOld;
-    %end
-
-	% Shift old FFTs
-	%for m=M:-1:2
-	%	XFm(:,m) = XFm(:,m-1);    
-	%	YFm(:,m) = YFm(:,m-1);    
-	%end
-    XFm(:,2:end) = XFm(:,1:end-1);
-    YFm(:,2:end) = YFm(:,1:end-1);
-    xfwm(:,2:end) = xfwm(:,1:end-1);
-    dfm(:,2:end) = dfm(:,1:end-1);
-
-	%if mod(kk, floor(Nb/50)) == 0
-    %    fprintf(1, '.');
-	%end
-
-	if mod(kk, floor(Nb/100)) == 0
-	%if mod(kk, floor(Nb/500)) == 0
-        progressbar(kk/Nb); 
-        %figure(5)
-        %plot(abs(WFb));
-        %legend('1','2','3','4','5','6','7','8','9','10','11','12');
-        %title(kk*N/fs);
-        %figure(6)
-        %plot(WFbD);
-        %figure(6)
-        %plot(threshMod)
-        %if length(divergeIdx) > 0
-        %    plot(abs(DD))
-        %    hold on
-        %    plot(abs(Ek), 'r')
-        %    hold off
-            %plot(min(sqrt(Sd./(Se+1e-10)),1))
-            %axis([0 N 0 1]);
-        %end
-        %figure(6)
-        %plot(cohedFast);
-        %axis([1 N+1 0 1]);
-        %plot(WFbEn);
-
-        %figure(7)
-        %plot(weight);
-        %plot([cohxd 1-cohed]);
-        %plot([cohxd 1-cohed 1-cohedFast hnled]);
-        %plot([cohxd cohxdFast/max(cohxdFast)]);
-        %legend('cohxd', '1-cohed', '1-cohedFast');
-        %axis([1 65 0 1]);
-        %pause(0.5);
-        %overdrive
-    end
-end
-progressbar(1);
-
-%figure(2);
-%plot([feat(:,1) feat(:,2)+1 feat(:,3)+2 mfeat+3]);
-%plot([feat(:,1) mfeat+1]);
-
-%figure(3);
-%plot(10*log10([dri erifb erifb3 ericn]));
-%legend('Near-end','Error','Post NLP','Final',4);
-% Compensate for delay
-%ercn=[ercn(N+1:end);zeros(N,1)];
-%ercn_=[ercn_(N+1:end);zeros(N,1)];
-
-%figure(11);
-%plot(cohxdSlow);
-
-%figure(12);
-%surf(cohxdSlow);
-%shading interp;
-
-%figure(13);
-%plot(overdriveM);
-
-%figure(14);
-%surf(overdriveM);
-%shading interp;
-
-figure(10);
-t = (0:Nb)*N/fs;
-rrinSubSamp = rrin(N*(1:(Nb+1)));
-plot(t, rrinSubSamp/max(abs(rrinSubSamp)),'b');
-hold on
-plot(t, hnledAvg, 'r');
-plot(t, hnlxdAvg, 'g');
-plot(t, hnlSortQV, 'y');
-plot(t, hnlLocalMinV, 'k');
-plot(t, cohxdLocalMinV, 'c');
-plot(t, hnlPrefAvgV, 'm');
-%plot(t, cohxdAvg, 'r');
-%plot(cohxdFastAvg, 'r');
-%plot(cohxdAvgBad, 'k');
-%plot(t, cohedAvg, 'k');
-%plot(t, 1-cohedFastAvg, 'k');
-%plot(ssin(N*(1:floor(length(ssin)/N)))/max(abs(ssin)));
-%plot(echoBands,'r');
-%plot(overdrive, 'g');
-%plot(erfb(N*(1:floor(length(erfb)/N)))/max(abs(erfb)));
-hold off
-tightx;
-
-figure(11)
-plot(t, ovrdV);
-tightx;
-%plot(mfeat,'r');
-%plot(1-cohxyp_,'r');
-%plot(Hnlxydp,'y');
-%plot(hnledp,'k');
-%plot(Hnlxydp, 'c');
-%plot(ccohpd_,'k');
-%plot(supplot_, 'g');
-%plot(ones(length(mfeat),1)*rr1_, 'k');
-%plot(ones(length(mfeat),1)*rr2_, 'k');
-%plot(N*(1:length(feat)), feat);
-%plot(Sep_,'r');
-%axis([1 floor(length(erfb)/N) -1 1])
-%hold off
-%plot(10*log10([Se_, Sx_, Seu_, real(sf_.*conj(sf_))]));
-%legend('Se','Sx','Seu','S');
-%figure(5)
-%plot([ercn ercn_]);
-
-figure(12)
-plot(t, dIdxV);
-%plot(t, SLxV);
-tightx;
-
-%figure(13)
-%plot(t, [ekEnV dkEnV]);
-%plot(t, dkEnV./(ekEnV+1e-10));
-%tightx;
-
-%close(hh);
-%spclab(fs,ssin,erfb,ercn,'outxd.pcm');
-%spclab(fs,rrin,ssin,erfb,1.78*ercn,'vqeOut-1.pcm');
-%spclab(fs,erfb,'aecOutLp.pcm');
-%spclab(fs,rrin,ssin,erfb,1.78*ercn,'aecOut25.pcm','vqeOut-1.pcm');
-%spclab(fs,rrin,ssin,erfb,ercn,'aecOut-mba.pcm');
-%spclab(fs,rrin,ssin,erfb,ercn,'aecOut.pcm');
-%spclab(fs, ssin, erfb, ercn, 'out0.pcm');
diff --git a/src/modules/audio_processing/aec/main/source/Android.mk b/src/modules/audio_processing/aec/main/source/Android.mk
deleted file mode 100644
index f16f26b..0000000
--- a/src/modules/audio_processing/aec/main/source/Android.mk
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_MODULE := libwebrtc_aec
-LOCAL_MODULE_TAGS := optional
-LOCAL_GENERATED_SOURCES :=
-LOCAL_SRC_FILES := \
-    echo_cancellation.c \
-    resampler.c \
-    aec_core.c \
-    aec_rdft.c 
-
-# Flags passed to both C and C++ files.
-MY_CFLAGS :=  
-MY_CFLAGS_C :=
-MY_DEFS := '-DNO_TCMALLOC' \
-    '-DNO_HEAPCHECKER' \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' \
-    '-DWEBRTC_THREAD_RR'
-ifeq ($(TARGET_ARCH),arm) 
-MY_DEFS += \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
-else
-LOCAL_SRC_FILES += \
-    aec_core_sse2.c \
-    aec_rdft_sse2.c
-endif
-LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
-
-# Include paths placed before CFLAGS/CPPFLAGS
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/../../../../.. \
-    $(LOCAL_PATH)/../interface \
-    $(LOCAL_PATH)/../../../utility \
-    $(LOCAL_PATH)/../../../../../common_audio/signal_processing_library/main/interface 
-
-# Flags passed to only C++ (and not C) files.
-LOCAL_CPPFLAGS := 
-
-LOCAL_LDFLAGS :=
-
-LOCAL_STATIC_LIBRARIES :=
-
-LOCAL_SHARED_LIBRARIES := libcutils \
-    libdl \
-    libstlport
-LOCAL_ADDITIONAL_DEPENDENCIES :=
-
-include external/stlport/libstlport.mk
-include $(BUILD_STATIC_LIBRARY)
diff --git a/src/modules/audio_processing/aec/main/source/aec_rdft.h b/src/modules/audio_processing/aec/main/source/aec_rdft.h
deleted file mode 100644
index cf90882..0000000
--- a/src/modules/audio_processing/aec/main/source/aec_rdft.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-// constants shared by all paths (C, SSE2).
-extern float rdft_w[64];
-
-// code path selection function pointers
-typedef void (*rft_sub_128_t)(float *a);
-extern rft_sub_128_t rftfsub_128;
-extern rft_sub_128_t rftbsub_128;
-
-// entry points
-void aec_rdft_init(void);
-void aec_rdft_init_sse2(void);
-void aec_rdft_forward_128(float *a);
-void aec_rdft_inverse_128(float *a);
diff --git a/src/modules/audio_processing/aec/main/source/aec_rdft_sse2.c b/src/modules/audio_processing/aec/main/source/aec_rdft_sse2.c
deleted file mode 100644
index 901a1b1..0000000
--- a/src/modules/audio_processing/aec/main/source/aec_rdft_sse2.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <emmintrin.h>
-
-#include "aec_rdft.h"
-
-#ifdef _MSC_VER /* visual c++ */
-# define ALIGN16_BEG __declspec(align(16))
-# define ALIGN16_END
-#else /* gcc or icc */
-# define ALIGN16_BEG
-# define ALIGN16_END __attribute__((aligned(16)))
-#endif
-
-static void rftfsub_128_SSE2(float *a) {
-  const float *c = rdft_w + 32;
-  int j1, j2, k1, k2;
-  float wkr, wki, xr, xi, yr, yi;
-
-  static const ALIGN16_BEG float ALIGN16_END k_half[4] =
-    {0.5f, 0.5f, 0.5f, 0.5f};
-  const __m128 mm_half = _mm_load_ps(k_half);
-
-  // Vectorized code (four at once).
-  //    Note: commented number are indexes for the first iteration of the loop.
-  for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) {
-    // Load 'wk'.
-    const __m128 c_j1 = _mm_loadu_ps(&c[     j1]);         //  1,  2,  3,  4,
-    const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]);         // 28, 29, 30, 31,
-    const __m128 wkrt = _mm_sub_ps(mm_half, c_k1);         // 28, 29, 30, 31,
-    const __m128 wkr_ =
-      _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3)); // 31, 30, 29, 28,
-    const __m128 wki_ = c_j1;                              //  1,  2,  3,  4,
-    // Load and shuffle 'a'.
-    const __m128 a_j2_0 = _mm_loadu_ps(&a[0   + j2]);  //   2,   3,   4,   5,
-    const __m128 a_j2_4 = _mm_loadu_ps(&a[4   + j2]);  //   6,   7,   8,   9,
-    const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]);  // 120, 121, 122, 123,
-    const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]);  // 124, 125, 126, 127,
-    const __m128 a_j2_p0 = _mm_shuffle_ps(a_j2_0, a_j2_4,
-                            _MM_SHUFFLE(2, 0, 2 ,0));  //   2,   4,   6,   8,
-    const __m128 a_j2_p1 = _mm_shuffle_ps(a_j2_0, a_j2_4,
-                            _MM_SHUFFLE(3, 1, 3 ,1));  //   3,   5,   7,   9,
-    const __m128 a_k2_p0 = _mm_shuffle_ps(a_k2_4, a_k2_0,
-                            _MM_SHUFFLE(0, 2, 0 ,2));  // 126, 124, 122, 120,
-    const __m128 a_k2_p1 = _mm_shuffle_ps(a_k2_4, a_k2_0,
-                            _MM_SHUFFLE(1, 3, 1 ,3));  // 127, 125, 123, 121,
-    // Calculate 'x'.
-    const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0);
-                                               // 2-126, 4-124, 6-122, 8-120,
-    const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1);
-                                               // 3-127, 5-125, 7-123, 9-121,
-    // Calculate product into 'y'.
-    //    yr = wkr * xr - wki * xi;
-    //    yi = wkr * xi + wki * xr;
-    const __m128 a_ = _mm_mul_ps(wkr_, xr_);
-    const __m128 b_ = _mm_mul_ps(wki_, xi_);
-    const __m128 c_ = _mm_mul_ps(wkr_, xi_);
-    const __m128 d_ = _mm_mul_ps(wki_, xr_);
-    const __m128 yr_ = _mm_sub_ps(a_, b_);     // 2-126, 4-124, 6-122, 8-120,
-    const __m128 yi_ = _mm_add_ps(c_, d_);     // 3-127, 5-125, 7-123, 9-121,
-    // Update 'a'.
-    //    a[j2 + 0] -= yr;
-    //    a[j2 + 1] -= yi;
-    //    a[k2 + 0] += yr;
-    //    a[k2 + 1] -= yi;
-    const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_);  //   2,   4,   6,   8,
-    const __m128 a_j2_p1n = _mm_sub_ps(a_j2_p1, yi_);  //   3,   5,   7,   9,
-    const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_);  // 126, 124, 122, 120,
-    const __m128 a_k2_p1n = _mm_sub_ps(a_k2_p1, yi_);  // 127, 125, 123, 121,
-    // Shuffle in right order and store.
-    const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n);
-                                                       //   2,   3,   4,   5,
-    const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n);
-                                                       //   6,   7,   8,   9,
-    const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n);
-                                                       // 122, 123, 120, 121,
-    const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n);
-                                                       // 126, 127, 124, 125,
-    const __m128 a_k2_0n = _mm_shuffle_ps(a_k2_0nt, a_k2_0nt,
-                            _MM_SHUFFLE(1, 0, 3 ,2));  // 120, 121, 122, 123,
-    const __m128 a_k2_4n = _mm_shuffle_ps(a_k2_4nt, a_k2_4nt,
-                            _MM_SHUFFLE(1, 0, 3 ,2));  // 124, 125, 126, 127,
-    _mm_storeu_ps(&a[0   + j2], a_j2_0n);
-    _mm_storeu_ps(&a[4   + j2], a_j2_4n);
-    _mm_storeu_ps(&a[122 - j2], a_k2_0n);
-    _mm_storeu_ps(&a[126 - j2], a_k2_4n);
-  }
-  // Scalar code for the remaining items.
-  for (; j2 < 64; j1 += 1, j2 += 2) {
-    k2 = 128 - j2;
-    k1 =  32 - j1;
-    wkr = 0.5f - c[k1];
-    wki = c[j1];
-    xr = a[j2 + 0] - a[k2 + 0];
-    xi = a[j2 + 1] + a[k2 + 1];
-    yr = wkr * xr - wki * xi;
-    yi = wkr * xi + wki * xr;
-    a[j2 + 0] -= yr;
-    a[j2 + 1] -= yi;
-    a[k2 + 0] += yr;
-    a[k2 + 1] -= yi;
-  }
-}
-
-static void rftbsub_128_SSE2(float *a) {
-  const float *c = rdft_w + 32;
-  int j1, j2, k1, k2;
-  float wkr, wki, xr, xi, yr, yi;
-
-  static const ALIGN16_BEG float ALIGN16_END k_half[4] =
-    {0.5f, 0.5f, 0.5f, 0.5f};
-  const __m128 mm_half = _mm_load_ps(k_half);
-
-  a[1] = -a[1];
-  // Vectorized code (four at once).
-  //    Note: commented number are indexes for the first iteration of the loop.
-  for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) {
-    // Load 'wk'.
-    const __m128 c_j1 = _mm_loadu_ps(&c[     j1]);         //  1,  2,  3,  4,
-    const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]);         // 28, 29, 30, 31,
-    const __m128 wkrt = _mm_sub_ps(mm_half, c_k1);         // 28, 29, 30, 31,
-    const __m128 wkr_ =
-      _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3)); // 31, 30, 29, 28,
-    const __m128 wki_ = c_j1;                              //  1,  2,  3,  4,
-    // Load and shuffle 'a'.
-    const __m128 a_j2_0 = _mm_loadu_ps(&a[0   + j2]);  //   2,   3,   4,   5,
-    const __m128 a_j2_4 = _mm_loadu_ps(&a[4   + j2]);  //   6,   7,   8,   9,
-    const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]);  // 120, 121, 122, 123,
-    const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]);  // 124, 125, 126, 127,
-    const __m128 a_j2_p0 = _mm_shuffle_ps(a_j2_0, a_j2_4,
-                            _MM_SHUFFLE(2, 0, 2 ,0));  //   2,   4,   6,   8,
-    const __m128 a_j2_p1 = _mm_shuffle_ps(a_j2_0, a_j2_4,
-                            _MM_SHUFFLE(3, 1, 3 ,1));  //   3,   5,   7,   9,
-    const __m128 a_k2_p0 = _mm_shuffle_ps(a_k2_4, a_k2_0,
-                            _MM_SHUFFLE(0, 2, 0 ,2));  // 126, 124, 122, 120,
-    const __m128 a_k2_p1 = _mm_shuffle_ps(a_k2_4, a_k2_0,
-                            _MM_SHUFFLE(1, 3, 1 ,3));  // 127, 125, 123, 121,
-    // Calculate 'x'.
-    const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0);
-                                               // 2-126, 4-124, 6-122, 8-120,
-    const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1);
-                                               // 3-127, 5-125, 7-123, 9-121,
-    // Calculate product into 'y'.
-    //    yr = wkr * xr + wki * xi;
-    //    yi = wkr * xi - wki * xr;
-    const __m128 a_ = _mm_mul_ps(wkr_, xr_);
-    const __m128 b_ = _mm_mul_ps(wki_, xi_);
-    const __m128 c_ = _mm_mul_ps(wkr_, xi_);
-    const __m128 d_ = _mm_mul_ps(wki_, xr_);
-    const __m128 yr_ = _mm_add_ps(a_, b_);     // 2-126, 4-124, 6-122, 8-120,
-    const __m128 yi_ = _mm_sub_ps(c_, d_);     // 3-127, 5-125, 7-123, 9-121,
-    // Update 'a'.
-    //    a[j2 + 0] = a[j2 + 0] - yr;
-    //    a[j2 + 1] = yi - a[j2 + 1];
-    //    a[k2 + 0] = yr + a[k2 + 0];
-    //    a[k2 + 1] = yi - a[k2 + 1];
-    const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_);  //   2,   4,   6,   8,
-    const __m128 a_j2_p1n = _mm_sub_ps(yi_, a_j2_p1);  //   3,   5,   7,   9,
-    const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_);  // 126, 124, 122, 120,
-    const __m128 a_k2_p1n = _mm_sub_ps(yi_, a_k2_p1);  // 127, 125, 123, 121,
-    // Shuffle in right order and store.
-    // Shuffle in right order and store.
-    const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n);
-                                                       //   2,   3,   4,   5,
-    const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n);
-                                                       //   6,   7,   8,   9,
-    const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n);
-                                                       // 122, 123, 120, 121,
-    const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n);
-                                                       // 126, 127, 124, 125,
-    const __m128 a_k2_0n = _mm_shuffle_ps(a_k2_0nt, a_k2_0nt,
-                            _MM_SHUFFLE(1, 0, 3 ,2));  // 120, 121, 122, 123,
-    const __m128 a_k2_4n = _mm_shuffle_ps(a_k2_4nt, a_k2_4nt,
-                            _MM_SHUFFLE(1, 0, 3 ,2));  // 124, 125, 126, 127,
-    _mm_storeu_ps(&a[0   + j2], a_j2_0n);
-    _mm_storeu_ps(&a[4   + j2], a_j2_4n);
-    _mm_storeu_ps(&a[122 - j2], a_k2_0n);
-    _mm_storeu_ps(&a[126 - j2], a_k2_4n);
-  }
-  // Scalar code for the remaining items.
-  for (; j2 < 64; j1 += 1, j2 += 2) {
-    k2 = 128 - j2;
-    k1 =  32 - j1;
-    wkr = 0.5f - c[k1];
-    wki = c[j1];
-    xr = a[j2 + 0] - a[k2 + 0];
-    xi = a[j2 + 1] + a[k2 + 1];
-    yr = wkr * xr + wki * xi;
-    yi = wkr * xi - wki * xr;
-    a[j2 + 0] = a[j2 + 0] - yr;
-    a[j2 + 1] = yi - a[j2 + 1];
-    a[k2 + 0] = yr + a[k2 + 0];
-    a[k2 + 1] = yi - a[k2 + 1];
-  }
-  a[65] = -a[65];
-}
-
-void aec_rdft_init_sse2(void) {
-  rftfsub_128 = rftfsub_128_SSE2;
-  rftbsub_128 = rftbsub_128_SSE2;
-}
diff --git a/src/modules/audio_processing/aec/main/source/echo_cancellation.c b/src/modules/audio_processing/aec/main/source/echo_cancellation.c
deleted file mode 100644
index 1313e35..0000000
--- a/src/modules/audio_processing/aec/main/source/echo_cancellation.c
+++ /dev/null
@@ -1,821 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-/*
- * Contains the API functions for the AEC.
- */
-#include <stdlib.h>
-#include <string.h>
-
-#include "echo_cancellation.h"
-#include "aec_core.h"
-#include "ring_buffer.h"
-#include "resampler.h"
-#ifdef AEC_DEBUG
-    #include <stdio.h>
-#endif
-
-#define BUF_SIZE_FRAMES 50 // buffer size (frames)
-// Maximum length of resampled signal. Must be an integer multiple of frames
-// (ceil(1/(1 + MIN_SKEW)*2) + 1)*FRAME_LEN
-// The factor of 2 handles wb, and the + 1 is as a safety margin
-#define MAX_RESAMP_LEN (5 * FRAME_LEN)
-
-static const int bufSizeSamp = BUF_SIZE_FRAMES * FRAME_LEN; // buffer size (samples)
-static const int sampMsNb = 8; // samples per ms in nb
-// Target suppression levels for nlp modes
-// log{0.001, 0.00001, 0.00000001}
-static const float targetSupp[3] = {-6.9f, -11.5f, -18.4f};
-static const float minOverDrive[3] = {1.0f, 2.0f, 5.0f};
-static const int initCheck = 42;
-
-typedef struct {
-    int delayCtr;
-    int sampFreq;
-    int splitSampFreq;
-    int scSampFreq;
-    float sampFactor; // scSampRate / sampFreq
-    short nlpMode;
-    short autoOnOff;
-    short activity;
-    short skewMode;
-    short bufSizeStart;
-    //short bufResetCtr;  // counts number of noncausal frames
-    int knownDelay;
-
-    // Stores the last frame added to the farend buffer
-    short farendOld[2][FRAME_LEN];
-    short initFlag; // indicates if AEC has been initialized
-
-    // Variables used for averaging far end buffer size
-    short counter;
-    short sum;
-    short firstVal;
-    short checkBufSizeCtr;
-
-    // Variables used for delay shifts
-    short msInSndCardBuf;
-    short filtDelay;
-    int timeForDelayChange;
-    int ECstartup;
-    int checkBuffSize;
-    int delayChange;
-    short lastDelayDiff;
-
-#ifdef AEC_DEBUG
-    FILE *bufFile;
-    FILE *delayFile;
-    FILE *skewFile;
-    FILE *preCompFile;
-    FILE *postCompFile;
-#endif // AEC_DEBUG
-
-    // Structures
-    void *farendBuf;
-    void *resampler;
-
-    int skewFrCtr;
-    int resample; // if the skew is small enough we don't resample
-    int highSkewCtr;
-    float skew;
-
-    int lastError;
-
-    aec_t *aec;
-} aecpc_t;
-
-// Estimates delay to set the position of the farend buffer read pointer
-// (controlled by knownDelay)
-static int EstBufDelay(aecpc_t *aecInst, short msInSndCardBuf);
-
-// Stuffs the farend buffer if the estimated delay is too large
-static int DelayComp(aecpc_t *aecInst);
-
-WebRtc_Word32 WebRtcAec_Create(void **aecInst)
-{
-    aecpc_t *aecpc;
-    if (aecInst == NULL) {
-        return -1;
-    }
-
-    aecpc = malloc(sizeof(aecpc_t));
-    *aecInst = aecpc;
-    if (aecpc == NULL) {
-        return -1;
-    }
-
-    if (WebRtcAec_CreateAec(&aecpc->aec) == -1) {
-        WebRtcAec_Free(aecpc);
-        aecpc = NULL;
-        return -1;
-    }
-
-    if (WebRtcApm_CreateBuffer(&aecpc->farendBuf, bufSizeSamp) == -1) {
-        WebRtcAec_Free(aecpc);
-        aecpc = NULL;
-        return -1;
-    }
-
-    if (WebRtcAec_CreateResampler(&aecpc->resampler) == -1) {
-        WebRtcAec_Free(aecpc);
-        aecpc = NULL;
-        return -1;
-    }
-
-    aecpc->initFlag = 0;
-    aecpc->lastError = 0;
-
-#ifdef AEC_DEBUG
-    aecpc->aec->farFile = fopen("aecFar.pcm","wb");
-    aecpc->aec->nearFile = fopen("aecNear.pcm","wb");
-    aecpc->aec->outFile = fopen("aecOut.pcm","wb");
-    aecpc->aec->outLpFile = fopen("aecOutLp.pcm","wb");
-
-    aecpc->bufFile = fopen("aecBuf.dat", "wb");
-    aecpc->skewFile = fopen("aecSkew.dat", "wb");
-    aecpc->delayFile = fopen("aecDelay.dat", "wb");
-    aecpc->preCompFile = fopen("preComp.pcm", "wb");
-    aecpc->postCompFile = fopen("postComp.pcm", "wb");
-#endif // AEC_DEBUG
-
-    return 0;
-}
-
-WebRtc_Word32 WebRtcAec_Free(void *aecInst)
-{
-    aecpc_t *aecpc = aecInst;
-
-    if (aecpc == NULL) {
-        return -1;
-    }
-
-#ifdef AEC_DEBUG
-    fclose(aecpc->aec->farFile);
-    fclose(aecpc->aec->nearFile);
-    fclose(aecpc->aec->outFile);
-    fclose(aecpc->aec->outLpFile);
-
-    fclose(aecpc->bufFile);
-    fclose(aecpc->skewFile);
-    fclose(aecpc->delayFile);
-    fclose(aecpc->preCompFile);
-    fclose(aecpc->postCompFile);
-#endif // AEC_DEBUG
-
-    WebRtcAec_FreeAec(aecpc->aec);
-    WebRtcApm_FreeBuffer(aecpc->farendBuf);
-    WebRtcAec_FreeResampler(aecpc->resampler);
-    free(aecpc);
-
-    return 0;
-}
-
-WebRtc_Word32 WebRtcAec_Init(void *aecInst, WebRtc_Word32 sampFreq, WebRtc_Word32 scSampFreq)
-{
-    aecpc_t *aecpc = aecInst;
-    AecConfig aecConfig;
-
-    if (aecpc == NULL) {
-        return -1;
-    }
-
-    if (sampFreq != 8000 && sampFreq != 16000  && sampFreq != 32000) {
-        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
-        return -1;
-    }
-    aecpc->sampFreq = sampFreq;
-
-    if (scSampFreq < 1 || scSampFreq > 96000) {
-        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
-        return -1;
-    }
-    aecpc->scSampFreq = scSampFreq;
-
-    // Initialize echo canceller core
-    if (WebRtcAec_InitAec(aecpc->aec, aecpc->sampFreq) == -1) {
-        aecpc->lastError = AEC_UNSPECIFIED_ERROR;
-        return -1;
-    }
-
-    // Initialize farend buffer
-    if (WebRtcApm_InitBuffer(aecpc->farendBuf) == -1) {
-        aecpc->lastError = AEC_UNSPECIFIED_ERROR;
-        return -1;
-    }
-
-    if (WebRtcAec_InitResampler(aecpc->resampler, aecpc->scSampFreq) == -1) {
-        aecpc->lastError = AEC_UNSPECIFIED_ERROR;
-        return -1;
-    }
-
-    aecpc->initFlag = initCheck;  // indicates that initilisation has been done
-
-    if (aecpc->sampFreq == 32000) {
-        aecpc->splitSampFreq = 16000;
-    }
-    else {
-        aecpc->splitSampFreq = sampFreq;
-    }
-
-    aecpc->skewFrCtr = 0;
-    aecpc->activity = 0;
-
-    aecpc->delayChange = 1;
-    aecpc->delayCtr = 0;
-
-    aecpc->sum = 0;
-    aecpc->counter = 0;
-    aecpc->checkBuffSize = 1;
-    aecpc->firstVal = 0;
-
-    aecpc->ECstartup = 1;
-    aecpc->bufSizeStart = 0;
-    aecpc->checkBufSizeCtr = 0;
-    aecpc->filtDelay = 0;
-    aecpc->timeForDelayChange =0;
-    aecpc->knownDelay = 0;
-    aecpc->lastDelayDiff = 0;
-
-    aecpc->skew = 0;
-    aecpc->resample = kAecFalse;
-    aecpc->highSkewCtr = 0;
-    aecpc->sampFactor = (aecpc->scSampFreq * 1.0f) / aecpc->splitSampFreq;
-
-    memset(&aecpc->farendOld[0][0], 0, 160);
-
-    // Default settings.
-    aecConfig.nlpMode = kAecNlpModerate;
-    aecConfig.skewMode = kAecFalse;
-    aecConfig.metricsMode = kAecFalse;
-
-    if (WebRtcAec_set_config(aecpc, aecConfig) == -1) {
-        aecpc->lastError = AEC_UNSPECIFIED_ERROR;
-        return -1;
-    }
-
-    return 0;
-}
-
-// only buffer L band for farend
-WebRtc_Word32 WebRtcAec_BufferFarend(void *aecInst, const WebRtc_Word16 *farend,
-    WebRtc_Word16 nrOfSamples)
-{
-    aecpc_t *aecpc = aecInst;
-    WebRtc_Word32 retVal = 0;
-    short newNrOfSamples;
-    short newFarend[MAX_RESAMP_LEN];
-    float skew;
-
-    if (aecpc == NULL) {
-        return -1;
-    }
-
-    if (farend == NULL) {
-        aecpc->lastError = AEC_NULL_POINTER_ERROR;
-        return -1;
-    }
-
-    if (aecpc->initFlag != initCheck) {
-        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
-        return -1;
-    }
-
-    // number of samples == 160 for SWB input
-    if (nrOfSamples != 80 && nrOfSamples != 160) {
-        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
-        return -1;
-    }
-
-    skew = aecpc->skew;
-
-    // TODO: Is this really a good idea?
-    if (!aecpc->ECstartup) {
-        DelayComp(aecpc);
-    }
-
-    if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
-        // Resample and get a new number of samples
-        newNrOfSamples = WebRtcAec_ResampleLinear(aecpc->resampler,
-                                                  farend,
-                                                  nrOfSamples,
-                                                  skew,
-                                                  newFarend);
-        WebRtcApm_WriteBuffer(aecpc->farendBuf, newFarend, newNrOfSamples);
-
-#ifdef AEC_DEBUG
-        fwrite(farend, 2, nrOfSamples, aecpc->preCompFile);
-        fwrite(newFarend, 2, newNrOfSamples, aecpc->postCompFile);
-#endif
-    }
-    else {
-        WebRtcApm_WriteBuffer(aecpc->farendBuf, farend, nrOfSamples);
-    }
-
-    return retVal;
-}
-
-WebRtc_Word32 WebRtcAec_Process(void *aecInst, const WebRtc_Word16 *nearend,
-    const WebRtc_Word16 *nearendH, WebRtc_Word16 *out, WebRtc_Word16 *outH,
-    WebRtc_Word16 nrOfSamples, WebRtc_Word16 msInSndCardBuf, WebRtc_Word32 skew)
-{
-    aecpc_t *aecpc = aecInst;
-    WebRtc_Word32 retVal = 0;
-    short i;
-    short farend[FRAME_LEN];
-    short nmbrOfFilledBuffers;
-    short nBlocks10ms;
-    short nFrames;
-#ifdef AEC_DEBUG
-    short msInAECBuf;
-#endif
-    // Limit resampling to doubling/halving of signal
-    const float minSkewEst = -0.5f;
-    const float maxSkewEst = 1.0f;
-
-    if (aecpc == NULL) {
-        return -1;
-    }
-
-    if (nearend == NULL) {
-        aecpc->lastError = AEC_NULL_POINTER_ERROR;
-        return -1;
-    }
-
-    if (out == NULL) {
-        aecpc->lastError = AEC_NULL_POINTER_ERROR;
-        return -1;
-    }
-
-    if (aecpc->initFlag != initCheck) {
-        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
-        return -1;
-    }
-
-    // number of samples == 160 for SWB input
-    if (nrOfSamples != 80 && nrOfSamples != 160) {
-        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
-        return -1;
-    }
-
-    // Check for valid pointers based on sampling rate
-    if (aecpc->sampFreq == 32000 && nearendH == NULL) {
-       aecpc->lastError = AEC_NULL_POINTER_ERROR;
-       return -1;
-    }
-
-    if (msInSndCardBuf < 0) {
-        msInSndCardBuf = 0;
-        aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
-        retVal = -1;
-    }
-    else if (msInSndCardBuf > 500) {
-        msInSndCardBuf = 500;
-        aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
-        retVal = -1;
-    }
-    msInSndCardBuf += 10;
-    aecpc->msInSndCardBuf = msInSndCardBuf;
-
-    if (aecpc->skewMode == kAecTrue) {
-        if (aecpc->skewFrCtr < 25) {
-            aecpc->skewFrCtr++;
-        }
-        else {
-            retVal = WebRtcAec_GetSkew(aecpc->resampler, skew, &aecpc->skew);
-            if (retVal == -1) {
-                aecpc->skew = 0;
-                aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
-            }
-
-            aecpc->skew /= aecpc->sampFactor*nrOfSamples;
-
-            if (aecpc->skew < 1.0e-3 && aecpc->skew > -1.0e-3) {
-                aecpc->resample = kAecFalse;
-            }
-            else {
-                aecpc->resample = kAecTrue;
-            }
-
-            if (aecpc->skew < minSkewEst) {
-                aecpc->skew = minSkewEst;
-            }
-            else if (aecpc->skew > maxSkewEst) {
-                aecpc->skew = maxSkewEst;
-            }
-
-#ifdef AEC_DEBUG
-            fwrite(&aecpc->skew, sizeof(aecpc->skew), 1, aecpc->skewFile);
-#endif
-        }
-    }
-
-    nFrames = nrOfSamples / FRAME_LEN;
-    nBlocks10ms = nFrames / aecpc->aec->mult;
-
-    if (aecpc->ECstartup) {
-        memcpy(out, nearend, sizeof(short) * nrOfSamples);
-        nmbrOfFilledBuffers = WebRtcApm_get_buffer_size(aecpc->farendBuf) / FRAME_LEN;
-
-        // The AEC is in the start up mode
-        // AEC is disabled until the soundcard buffer and farend buffers are OK
-
-        // Mechanism to ensure that the soundcard buffer is reasonably stable.
-        if (aecpc->checkBuffSize) {
-
-            aecpc->checkBufSizeCtr++;
-            // Before we fill up the far end buffer we require the amount of data on the
-            // sound card to be stable (+/-8 ms) compared to the first value. This
-            // comparison is made during the following 4 consecutive frames. If it seems
-            // to be stable then we start to fill up the far end buffer.
-
-            if (aecpc->counter == 0) {
-                aecpc->firstVal = aecpc->msInSndCardBuf;
-                aecpc->sum = 0;
-            }
-
-            if (abs(aecpc->firstVal - aecpc->msInSndCardBuf) <
-                WEBRTC_SPL_MAX(0.2 * aecpc->msInSndCardBuf, sampMsNb)) {
-                aecpc->sum += aecpc->msInSndCardBuf;
-                aecpc->counter++;
-            }
-            else {
-                aecpc->counter = 0;
-            }
-
-            if (aecpc->counter*nBlocks10ms >= 6) {
-                // The farend buffer size is determined in blocks of 80 samples
-                // Use 75% of the average value of the soundcard buffer
-                aecpc->bufSizeStart = WEBRTC_SPL_MIN((int) (0.75 * (aecpc->sum *
-                    aecpc->aec->mult) / (aecpc->counter * 10)), BUF_SIZE_FRAMES);
-                // buffersize has now been determined
-                aecpc->checkBuffSize = 0;
-            }
-
-            if (aecpc->checkBufSizeCtr * nBlocks10ms > 50) {
-                // for really bad sound cards, don't disable echocanceller for more than 0.5 sec
-                aecpc->bufSizeStart = WEBRTC_SPL_MIN((int) (0.75 * (aecpc->msInSndCardBuf *
-                    aecpc->aec->mult) / 10), BUF_SIZE_FRAMES);
-                aecpc->checkBuffSize = 0;
-            }
-        }
-
-        // if checkBuffSize changed in the if-statement above
-        if (!aecpc->checkBuffSize) {
-            // soundcard buffer is now reasonably stable
-            // When the far end buffer is filled with approximately the same amount of
-            // data as the amount on the sound card we end the start up phase and start
-            // to cancel echoes.
-
-            if (nmbrOfFilledBuffers == aecpc->bufSizeStart) {
-                aecpc->ECstartup = 0;  // Enable the AEC
-            }
-            else if (nmbrOfFilledBuffers > aecpc->bufSizeStart) {
-                WebRtcApm_FlushBuffer(aecpc->farendBuf, WebRtcApm_get_buffer_size(aecpc->farendBuf) -
-                    aecpc->bufSizeStart * FRAME_LEN);
-                aecpc->ECstartup = 0;
-            }
-        }
-
-    }
-    else {
-        // AEC is enabled
-
-        // Note only 1 block supported for nb and 2 blocks for wb
-        for (i = 0; i < nFrames; i++) {
-            nmbrOfFilledBuffers = WebRtcApm_get_buffer_size(aecpc->farendBuf) / FRAME_LEN;
-
-            // Check that there is data in the far end buffer
-            if (nmbrOfFilledBuffers > 0) {
-                // Get the next 80 samples from the farend buffer
-                WebRtcApm_ReadBuffer(aecpc->farendBuf, farend, FRAME_LEN);
-
-                // Always store the last frame for use when we run out of data
-                memcpy(&(aecpc->farendOld[i][0]), farend, FRAME_LEN * sizeof(short));
-            }
-            else {
-                // We have no data so we use the last played frame
-                memcpy(farend, &(aecpc->farendOld[i][0]), FRAME_LEN * sizeof(short));
-            }
-
-            // Call buffer delay estimator when all data is extracted,
-            // i.e. i = 0 for NB and i = 1 for WB or SWB
-            if ((i == 0 && aecpc->splitSampFreq == 8000) ||
-                    (i == 1 && (aecpc->splitSampFreq == 16000))) {
-                EstBufDelay(aecpc, aecpc->msInSndCardBuf);
-            }
-
-            // Call the AEC
-           WebRtcAec_ProcessFrame(aecpc->aec, farend, &nearend[FRAME_LEN * i], &nearendH[FRAME_LEN * i],
-               &out[FRAME_LEN * i], &outH[FRAME_LEN * i], aecpc->knownDelay);
-        }
-    }
-
-#ifdef AEC_DEBUG
-    msInAECBuf = WebRtcApm_get_buffer_size(aecpc->farendBuf) / (sampMsNb*aecpc->aec->mult);
-    fwrite(&msInAECBuf, 2, 1, aecpc->bufFile);
-    fwrite(&(aecpc->knownDelay), sizeof(aecpc->knownDelay), 1, aecpc->delayFile);
-#endif
-
-    return retVal;
-}
-
-WebRtc_Word32 WebRtcAec_set_config(void *aecInst, AecConfig config)
-{
-    aecpc_t *aecpc = aecInst;
-
-    if (aecpc == NULL) {
-        return -1;
-    }
-
-    if (aecpc->initFlag != initCheck) {
-        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
-        return -1;
-    }
-
-    if (config.skewMode != kAecFalse && config.skewMode != kAecTrue) {
-        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
-        return -1;
-    }
-    aecpc->skewMode = config.skewMode;
-
-    if (config.nlpMode != kAecNlpConservative && config.nlpMode !=
-            kAecNlpModerate && config.nlpMode != kAecNlpAggressive) {
-        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
-        return -1;
-    }
-    aecpc->nlpMode = config.nlpMode;
-    aecpc->aec->targetSupp = targetSupp[aecpc->nlpMode];
-    aecpc->aec->minOverDrive = minOverDrive[aecpc->nlpMode];
-
-    if (config.metricsMode != kAecFalse && config.metricsMode != kAecTrue) {
-        aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
-        return -1;
-    }
-    aecpc->aec->metricsMode = config.metricsMode;
-    if (aecpc->aec->metricsMode == kAecTrue) {
-        WebRtcAec_InitMetrics(aecpc->aec);
-    }
-
-    return 0;
-}
-
-WebRtc_Word32 WebRtcAec_get_config(void *aecInst, AecConfig *config)
-{
-    aecpc_t *aecpc = aecInst;
-
-    if (aecpc == NULL) {
-        return -1;
-    }
-
-    if (config == NULL) {
-        aecpc->lastError = AEC_NULL_POINTER_ERROR;
-        return -1;
-    }
-
-    if (aecpc->initFlag != initCheck) {
-        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
-        return -1;
-    }
-
-    config->nlpMode = aecpc->nlpMode;
-    config->skewMode = aecpc->skewMode;
-    config->metricsMode = aecpc->aec->metricsMode;
-
-    return 0;
-}
-
-WebRtc_Word32 WebRtcAec_get_echo_status(void *aecInst, WebRtc_Word16 *status)
-{
-    aecpc_t *aecpc = aecInst;
-
-    if (aecpc == NULL) {
-        return -1;
-    }
-
-    if (status == NULL) {
-        aecpc->lastError = AEC_NULL_POINTER_ERROR;
-        return -1;
-    }
-
-    if (aecpc->initFlag != initCheck) {
-        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
-        return -1;
-    }
-
-    *status = aecpc->aec->echoState;
-
-    return 0;
-}
-
-WebRtc_Word32 WebRtcAec_GetMetrics(void *aecInst, AecMetrics *metrics)
-{
-    const float upweight = 0.7f;
-    float dtmp;
-    short stmp;
-    aecpc_t *aecpc = aecInst;
-
-    if (aecpc == NULL) {
-        return -1;
-    }
-
-    if (metrics == NULL) {
-        aecpc->lastError = AEC_NULL_POINTER_ERROR;
-        return -1;
-    }
-
-    if (aecpc->initFlag != initCheck) {
-        aecpc->lastError = AEC_UNINITIALIZED_ERROR;
-        return -1;
-    }
-
-    // ERL
-    metrics->erl.instant = (short) aecpc->aec->erl.instant;
-
-    if ((aecpc->aec->erl.himean > offsetLevel) && (aecpc->aec->erl.average > offsetLevel)) {
-    // Use a mix between regular average and upper part average
-        dtmp = upweight * aecpc->aec->erl.himean + (1 - upweight) * aecpc->aec->erl.average;
-        metrics->erl.average = (short) dtmp;
-    }
-    else {
-        metrics->erl.average = offsetLevel;
-    }
-
-    metrics->erl.max = (short) aecpc->aec->erl.max;
-
-    if (aecpc->aec->erl.min < (offsetLevel * (-1))) {
-        metrics->erl.min = (short) aecpc->aec->erl.min;
-    }
-    else {
-        metrics->erl.min = offsetLevel;
-    }
-
-    // ERLE
-    metrics->erle.instant = (short) aecpc->aec->erle.instant;
-
-    if ((aecpc->aec->erle.himean > offsetLevel) && (aecpc->aec->erle.average > offsetLevel)) {
-        // Use a mix between regular average and upper part average
-        dtmp =  upweight * aecpc->aec->erle.himean + (1 - upweight) * aecpc->aec->erle.average;
-        metrics->erle.average = (short) dtmp;
-    }
-    else {
-        metrics->erle.average = offsetLevel;
-    }
-
-    metrics->erle.max = (short) aecpc->aec->erle.max;
-
-    if (aecpc->aec->erle.min < (offsetLevel * (-1))) {
-        metrics->erle.min = (short) aecpc->aec->erle.min;
-    } else {
-        metrics->erle.min = offsetLevel;
-    }
-
-    // RERL
-    if ((metrics->erl.average > offsetLevel) && (metrics->erle.average > offsetLevel)) {
-        stmp = metrics->erl.average + metrics->erle.average;
-    }
-    else {
-        stmp = offsetLevel;
-    }
-    metrics->rerl.average = stmp;
-
-    // No other statistics needed, but returned for completeness
-    metrics->rerl.instant = stmp;
-    metrics->rerl.max = stmp;
-    metrics->rerl.min = stmp;
-
-    // A_NLP
-    metrics->aNlp.instant = (short) aecpc->aec->aNlp.instant;
-
-    if ((aecpc->aec->aNlp.himean > offsetLevel) && (aecpc->aec->aNlp.average > offsetLevel)) {
-        // Use a mix between regular average and upper part average
-        dtmp =  upweight * aecpc->aec->aNlp.himean + (1 - upweight) * aecpc->aec->aNlp.average;
-        metrics->aNlp.average = (short) dtmp;
-    }
-    else {
-        metrics->aNlp.average = offsetLevel;
-    }
-
-    metrics->aNlp.max = (short) aecpc->aec->aNlp.max;
-
-    if (aecpc->aec->aNlp.min < (offsetLevel * (-1))) {
-        metrics->aNlp.min = (short) aecpc->aec->aNlp.min;
-    }
-    else {
-        metrics->aNlp.min = offsetLevel;
-    }
-
-    return 0;
-}
-
-WebRtc_Word32 WebRtcAec_get_version(WebRtc_Word8 *versionStr, WebRtc_Word16 len)
-{
-    const char version[] = "AEC 2.5.0";
-    const short versionLen = (short)strlen(version) + 1; // +1 for null-termination
-
-    if (versionStr == NULL) {
-        return -1;
-    }
-
-    if (versionLen > len) {
-        return -1;
-    }
-
-    strncpy(versionStr, version, versionLen);
-    return 0;
-}
-
-WebRtc_Word32 WebRtcAec_get_error_code(void *aecInst)
-{
-    aecpc_t *aecpc = aecInst;
-
-    if (aecpc == NULL) {
-        return -1;
-    }
-
-    return aecpc->lastError;
-}
-
-static int EstBufDelay(aecpc_t *aecpc, short msInSndCardBuf)
-{
-    short delayNew, nSampFar, nSampSndCard;
-    short diff;
-
-    nSampFar = WebRtcApm_get_buffer_size(aecpc->farendBuf);
-    nSampSndCard = msInSndCardBuf * sampMsNb * aecpc->aec->mult;
-
-    delayNew = nSampSndCard - nSampFar;
-
-    // Account for resampling frame delay
-    if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
-        delayNew -= kResamplingDelay;
-    }
-
-    if (delayNew < FRAME_LEN) {
-        WebRtcApm_FlushBuffer(aecpc->farendBuf, FRAME_LEN);
-        delayNew += FRAME_LEN;
-    }
-
-    aecpc->filtDelay = WEBRTC_SPL_MAX(0, (short)(0.8*aecpc->filtDelay + 0.2*delayNew));
-
-    diff = aecpc->filtDelay - aecpc->knownDelay;
-    if (diff > 224) {
-        if (aecpc->lastDelayDiff < 96) {
-            aecpc->timeForDelayChange = 0;
-        }
-        else {
-            aecpc->timeForDelayChange++;
-        }
-    }
-    else if (diff < 96 && aecpc->knownDelay > 0) {
-        if (aecpc->lastDelayDiff > 224) {
-            aecpc->timeForDelayChange = 0;
-        }
-        else {
-            aecpc->timeForDelayChange++;
-        }
-    }
-    else {
-        aecpc->timeForDelayChange = 0;
-    }
-    aecpc->lastDelayDiff = diff;
-
-    if (aecpc->timeForDelayChange > 25) {
-        aecpc->knownDelay = WEBRTC_SPL_MAX((int)aecpc->filtDelay - 160, 0);
-    }
-    return 0;
-}
-
-static int DelayComp(aecpc_t *aecpc)
-{
-    int nSampFar, nSampSndCard, delayNew, nSampAdd;
-    const int maxStuffSamp = 10 * FRAME_LEN;
-
-    nSampFar = WebRtcApm_get_buffer_size(aecpc->farendBuf);
-    nSampSndCard = aecpc->msInSndCardBuf * sampMsNb * aecpc->aec->mult;
-    delayNew = nSampSndCard - nSampFar;
-
-    // Account for resampling frame delay
-    if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
-        delayNew -= kResamplingDelay;
-    }
-
-    if (delayNew > FAR_BUF_LEN - FRAME_LEN*aecpc->aec->mult) {
-        // The difference of the buffersizes is larger than the maximum
-        // allowed known delay. Compensate by stuffing the buffer.
-        nSampAdd = (int)(WEBRTC_SPL_MAX((int)(0.5 * nSampSndCard - nSampFar),
-                    FRAME_LEN));
-        nSampAdd = WEBRTC_SPL_MIN(nSampAdd, maxStuffSamp);
-
-        WebRtcApm_StuffBuffer(aecpc->farendBuf, nSampAdd);
-        aecpc->delayChange = 1; // the delay needs to be updated
-    }
-
-    return 0;
-}
diff --git a/src/modules/audio_processing/aecm/Android.mk b/src/modules/audio_processing/aecm/Android.mk
new file mode 100644
index 0000000..10c38ca
--- /dev/null
+++ b/src/modules/audio_processing/aecm/Android.mk
@@ -0,0 +1,78 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+#############################
+# Build the non-neon library.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+include $(LOCAL_PATH)/../../../../android-webrtc.mk
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := libwebrtc_aecm
+LOCAL_MODULE_TAGS := optional
+LOCAL_SRC_FILES := \
+    echo_control_mobile.c \
+    aecm_core.c
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := $(MY_WEBRTC_COMMON_DEFS)
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/interface \
+    $(LOCAL_PATH)/../utility \
+    $(LOCAL_PATH)/../../.. \
+    $(LOCAL_PATH)/../../../common_audio/signal_processing/include \
+    $(LOCAL_PATH)/../../../system_wrappers/interface
+
+LOCAL_STATIC_LIBRARIES += libwebrtc_system_wrappers
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libdl \
+    libstlport
+
+ifndef NDK_ROOT
+include external/stlport/libstlport.mk
+endif
+include $(BUILD_STATIC_LIBRARY)
+
+#########################
+# Build the neon library.
+ifeq ($(WEBRTC_BUILD_NEON_LIBS),true)
+
+include $(CLEAR_VARS)
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := libwebrtc_aecm_neon
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SRC_FILES := aecm_core_neon.c
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS) \
+    -mfpu=neon \
+    -mfloat-abi=softfp \
+    -flax-vector-conversions
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/interface \
+    $(LOCAL_PATH)/../../.. \
+    $(LOCAL_PATH)/../../../common_audio/signal_processing/include
+
+ifndef NDK_ROOT
+include external/stlport/libstlport.mk
+endif
+include $(BUILD_STATIC_LIBRARY)
+
+endif # ifeq ($(WEBRTC_BUILD_NEON_LIBS),true)
diff --git a/src/modules/audio_processing/aecm/main/source/aecm.gyp b/src/modules/audio_processing/aecm/aecm.gypi
similarity index 64%
rename from src/modules/audio_processing/aecm/main/source/aecm.gyp
rename to src/modules/audio_processing/aecm/aecm.gypi
index a535d2b..bf520bf 100644
--- a/src/modules/audio_processing/aecm/main/source/aecm.gyp
+++ b/src/modules/audio_processing/aecm/aecm.gypi
@@ -7,27 +7,25 @@
 # be found in the AUTHORS file in the root of the source tree.
 
 {
-  'includes': [
-    '../../../../../common_settings.gypi',
-  ],
   'targets': [
     {
       'target_name': 'aecm',
       'type': '<(library)',
       'dependencies': [
-        '../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
-        '../../../utility/util.gyp:apm_util'
+        '<(webrtc_root)/common_audio/common_audio.gyp:signal_processing',
+        '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+        'apm_util'
       ],
       'include_dirs': [
-        '../interface',
+        'interface',
       ],
       'direct_dependent_settings': {
         'include_dirs': [
-          '../interface',
+          'interface',
         ],
       },
       'sources': [
-        '../interface/echo_control_mobile.h',
+        'interface/echo_control_mobile.h',
         'echo_control_mobile.c',
         'aecm_core.c',
         'aecm_core.h',
@@ -35,9 +33,3 @@
     },
   ],
 }
-
-# Local Variables:
-# tab-width:2
-# indent-tabs-mode:nil
-# End:
-# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/modules/audio_processing/aecm/aecm_core.c b/src/modules/audio_processing/aecm/aecm_core.c
new file mode 100644
index 0000000..9bf5c4a
--- /dev/null
+++ b/src/modules/audio_processing/aecm/aecm_core.c
@@ -0,0 +1,2126 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "aecm_core.h"
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include "cpu_features_wrapper.h"
+#include "delay_estimator_wrapper.h"
+#include "echo_control_mobile.h"
+#include "ring_buffer.h"
+#include "typedefs.h"
+
+#ifdef ARM_WINM_LOG
+#include <stdio.h>
+#include <windows.h>
+#endif
+
+#ifdef AEC_DEBUG
+FILE *dfile;
+FILE *testfile;
+#endif
+
+#ifdef _MSC_VER // visual c++
+#define ALIGN8_BEG __declspec(align(8))
+#define ALIGN8_END
+#else // gcc or icc
+#define ALIGN8_BEG
+#define ALIGN8_END __attribute__((aligned(8)))
+#endif
+
+#ifdef AECM_SHORT
+
+// Square root of Hanning window in Q14
+const WebRtc_Word16 WebRtcAecm_kSqrtHanning[] =
+{
+    0, 804, 1606, 2404, 3196, 3981, 4756, 5520,
+    6270, 7005, 7723, 8423, 9102, 9760, 10394, 11003,
+    11585, 12140, 12665, 13160, 13623, 14053, 14449, 14811,
+    15137, 15426, 15679, 15893, 16069, 16207, 16305, 16364,
+    16384
+};
+
+#else
+
+// Square root of Hanning window in Q14
+const ALIGN8_BEG WebRtc_Word16 WebRtcAecm_kSqrtHanning[] ALIGN8_END =
+{
+    0, 399, 798, 1196, 1594, 1990, 2386, 2780, 3172,
+    3562, 3951, 4337, 4720, 5101, 5478, 5853, 6224, 6591, 6954, 7313, 7668, 8019, 8364,
+    8705, 9040, 9370, 9695, 10013, 10326, 10633, 10933, 11227, 11514, 11795, 12068, 12335,
+    12594, 12845, 13089, 13325, 13553, 13773, 13985, 14189, 14384, 14571, 14749, 14918,
+    15079, 15231, 15373, 15506, 15631, 15746, 15851, 15947, 16034, 16111, 16179, 16237,
+    16286, 16325, 16354, 16373, 16384
+};
+
+#endif
+
+//Q15 alpha = 0.99439986968132  const Factor for magnitude approximation
+static const WebRtc_UWord16 kAlpha1 = 32584;
+//Q15 beta = 0.12967166976970   const Factor for magnitude approximation
+static const WebRtc_UWord16 kBeta1 = 4249;
+//Q15 alpha = 0.94234827210087  const Factor for magnitude approximation
+static const WebRtc_UWord16 kAlpha2 = 30879;
+//Q15 beta = 0.33787806009150   const Factor for magnitude approximation
+static const WebRtc_UWord16 kBeta2 = 11072;
+//Q15 alpha = 0.82247698684306  const Factor for magnitude approximation
+static const WebRtc_UWord16 kAlpha3 = 26951;
+//Q15 beta = 0.57762063060713   const Factor for magnitude approximation
+static const WebRtc_UWord16 kBeta3 = 18927;
+
+// Initialization table for echo channel in 8 kHz
+static const WebRtc_Word16 kChannelStored8kHz[PART_LEN1] = {
+    2040,   1815,   1590,   1498,   1405,   1395,   1385,   1418,
+    1451,   1506,   1562,   1644,   1726,   1804,   1882,   1918,
+    1953,   1982,   2010,   2025,   2040,   2034,   2027,   2021,
+    2014,   1997,   1980,   1925,   1869,   1800,   1732,   1683,
+    1635,   1604,   1572,   1545,   1517,   1481,   1444,   1405,
+    1367,   1331,   1294,   1270,   1245,   1239,   1233,   1247,
+    1260,   1282,   1303,   1338,   1373,   1407,   1441,   1470,
+    1499,   1524,   1549,   1565,   1582,   1601,   1621,   1649,
+    1676
+};
+
+// Initialization table for echo channel in 16 kHz
+static const WebRtc_Word16 kChannelStored16kHz[PART_LEN1] = {
+    2040,   1590,   1405,   1385,   1451,   1562,   1726,   1882,
+    1953,   2010,   2040,   2027,   2014,   1980,   1869,   1732,
+    1635,   1572,   1517,   1444,   1367,   1294,   1245,   1233,
+    1260,   1303,   1373,   1441,   1499,   1549,   1582,   1621,
+    1676,   1741,   1802,   1861,   1921,   1983,   2040,   2102,
+    2170,   2265,   2375,   2515,   2651,   2781,   2922,   3075,
+    3253,   3471,   3738,   3976,   4151,   4258,   4308,   4288,
+    4270,   4253,   4237,   4179,   4086,   3947,   3757,   3484,
+    3153
+};
+
+static const WebRtc_Word16 kCosTable[] = {
+    8192,  8190,  8187,  8180,  8172,  8160,  8147,  8130,  8112,
+    8091,  8067,  8041,  8012,  7982,  7948,  7912,  7874,  7834,
+    7791,  7745,  7697,  7647,  7595,  7540,  7483,  7424,  7362,
+    7299,  7233,  7164,  7094,  7021,  6947,  6870,  6791,  6710,
+    6627,  6542,  6455,  6366,  6275,  6182,  6087,  5991,  5892,
+    5792,  5690,  5586,  5481,  5374,  5265,  5155,  5043,  4930,
+    4815,  4698,  4580,  4461,  4341,  4219,  4096,  3971,  3845,
+    3719,  3591,  3462,  3331,  3200,  3068,  2935,  2801,  2667,
+    2531,  2395,  2258,  2120,  1981,  1842,  1703,  1563,  1422,
+    1281,  1140,   998,   856,   713,   571,   428,   285,   142,
+       0,  -142,  -285,  -428,  -571,  -713,  -856,  -998, -1140,
+   -1281, -1422, -1563, -1703, -1842, -1981, -2120, -2258, -2395,
+   -2531, -2667, -2801, -2935, -3068, -3200, -3331, -3462, -3591,
+   -3719, -3845, -3971, -4095, -4219, -4341, -4461, -4580, -4698,
+   -4815, -4930, -5043, -5155, -5265, -5374, -5481, -5586, -5690,
+   -5792, -5892, -5991, -6087, -6182, -6275, -6366, -6455, -6542,
+   -6627, -6710, -6791, -6870, -6947, -7021, -7094, -7164, -7233,
+   -7299, -7362, -7424, -7483, -7540, -7595, -7647, -7697, -7745,
+   -7791, -7834, -7874, -7912, -7948, -7982, -8012, -8041, -8067,
+   -8091, -8112, -8130, -8147, -8160, -8172, -8180, -8187, -8190,
+   -8191, -8190, -8187, -8180, -8172, -8160, -8147, -8130, -8112,
+   -8091, -8067, -8041, -8012, -7982, -7948, -7912, -7874, -7834,
+   -7791, -7745, -7697, -7647, -7595, -7540, -7483, -7424, -7362,
+   -7299, -7233, -7164, -7094, -7021, -6947, -6870, -6791, -6710,
+   -6627, -6542, -6455, -6366, -6275, -6182, -6087, -5991, -5892,
+   -5792, -5690, -5586, -5481, -5374, -5265, -5155, -5043, -4930,
+   -4815, -4698, -4580, -4461, -4341, -4219, -4096, -3971, -3845,
+   -3719, -3591, -3462, -3331, -3200, -3068, -2935, -2801, -2667,
+   -2531, -2395, -2258, -2120, -1981, -1842, -1703, -1563, -1422,
+   -1281, -1140,  -998,  -856,  -713,  -571,  -428,  -285,  -142,
+       0,   142,   285,   428,   571,   713,   856,   998,  1140,
+    1281,  1422,  1563,  1703,  1842,  1981,  2120,  2258,  2395,
+    2531,  2667,  2801,  2935,  3068,  3200,  3331,  3462,  3591,
+    3719,  3845,  3971,  4095,  4219,  4341,  4461,  4580,  4698,
+    4815,  4930,  5043,  5155,  5265,  5374,  5481,  5586,  5690,
+    5792,  5892,  5991,  6087,  6182,  6275,  6366,  6455,  6542,
+    6627,  6710,  6791,  6870,  6947,  7021,  7094,  7164,  7233,
+    7299,  7362,  7424,  7483,  7540,  7595,  7647,  7697,  7745,
+    7791,  7834,  7874,  7912,  7948,  7982,  8012,  8041,  8067,
+    8091,  8112,  8130,  8147,  8160,  8172,  8180,  8187,  8190
+};
+
+static const WebRtc_Word16 kSinTable[] = {
+       0,    142,    285,    428,    571,    713,    856,    998,
+    1140,   1281,   1422,   1563,   1703,   1842,   1981,   2120,
+    2258,   2395,   2531,   2667,   2801,   2935,   3068,   3200,
+    3331,   3462,   3591,   3719,   3845,   3971,   4095,   4219,
+    4341,   4461,   4580,   4698,   4815,   4930,   5043,   5155,
+    5265,   5374,   5481,   5586,   5690,   5792,   5892,   5991,
+    6087,   6182,   6275,   6366,   6455,   6542,   6627,   6710,
+    6791,   6870,   6947,   7021,   7094,   7164,   7233,   7299,
+    7362,   7424,   7483,   7540,   7595,   7647,   7697,   7745,
+    7791,   7834,   7874,   7912,   7948,   7982,   8012,   8041,
+    8067,   8091,   8112,   8130,   8147,   8160,   8172,   8180,
+    8187,   8190,   8191,   8190,   8187,   8180,   8172,   8160,
+    8147,   8130,   8112,   8091,   8067,   8041,   8012,   7982,
+    7948,   7912,   7874,   7834,   7791,   7745,   7697,   7647,
+    7595,   7540,   7483,   7424,   7362,   7299,   7233,   7164,
+    7094,   7021,   6947,   6870,   6791,   6710,   6627,   6542,
+    6455,   6366,   6275,   6182,   6087,   5991,   5892,   5792,
+    5690,   5586,   5481,   5374,   5265,   5155,   5043,   4930,
+    4815,   4698,   4580,   4461,   4341,   4219,   4096,   3971,
+    3845,   3719,   3591,   3462,   3331,   3200,   3068,   2935,
+    2801,   2667,   2531,   2395,   2258,   2120,   1981,   1842,
+    1703,   1563,   1422,   1281,   1140,    998,    856,    713,
+     571,    428,    285,    142,      0,   -142,   -285,   -428,
+    -571,   -713,   -856,   -998,  -1140,  -1281,  -1422,  -1563,
+   -1703,  -1842,  -1981,  -2120,  -2258,  -2395,  -2531,  -2667,
+   -2801,  -2935,  -3068,  -3200,  -3331,  -3462,  -3591,  -3719,
+   -3845,  -3971,  -4095,  -4219,  -4341,  -4461,  -4580,  -4698,
+   -4815,  -4930,  -5043,  -5155,  -5265,  -5374,  -5481,  -5586,
+   -5690,  -5792,  -5892,  -5991,  -6087,  -6182,  -6275,  -6366,
+   -6455,  -6542,  -6627,  -6710,  -6791,  -6870,  -6947,  -7021,
+   -7094,  -7164,  -7233,  -7299,  -7362,  -7424,  -7483,  -7540,
+   -7595,  -7647,  -7697,  -7745,  -7791,  -7834,  -7874,  -7912,
+   -7948,  -7982,  -8012,  -8041,  -8067,  -8091,  -8112,  -8130,
+   -8147,  -8160,  -8172,  -8180,  -8187,  -8190,  -8191,  -8190,
+   -8187,  -8180,  -8172,  -8160,  -8147,  -8130,  -8112,  -8091,
+   -8067,  -8041,  -8012,  -7982,  -7948,  -7912,  -7874,  -7834,
+   -7791,  -7745,  -7697,  -7647,  -7595,  -7540,  -7483,  -7424,
+   -7362,  -7299,  -7233,  -7164,  -7094,  -7021,  -6947,  -6870,
+   -6791,  -6710,  -6627,  -6542,  -6455,  -6366,  -6275,  -6182,
+   -6087,  -5991,  -5892,  -5792,  -5690,  -5586,  -5481,  -5374,
+   -5265,  -5155,  -5043,  -4930,  -4815,  -4698,  -4580,  -4461,
+   -4341,  -4219,  -4096,  -3971,  -3845,  -3719,  -3591,  -3462,
+   -3331,  -3200,  -3068,  -2935,  -2801,  -2667,  -2531,  -2395,
+   -2258,  -2120,  -1981,  -1842,  -1703,  -1563,  -1422,  -1281,
+   -1140,   -998,   -856,   -713,   -571,   -428,   -285,   -142
+};
+
+static const WebRtc_Word16 kNoiseEstQDomain = 15;
+static const WebRtc_Word16 kNoiseEstIncCount = 5;
+
+static void ComfortNoise(AecmCore_t* aecm,
+                         const WebRtc_UWord16* dfa,
+                         complex16_t* out,
+                         const WebRtc_Word16* lambda);
+
+static WebRtc_Word16 CalcSuppressionGain(AecmCore_t * const aecm);
+
+// Moves the pointer to the next entry and inserts |far_spectrum| and
+// corresponding Q-domain in its buffer.
+//
+// Inputs:
+//      - self          : Pointer to the delay estimation instance
+//      - far_spectrum  : Pointer to the far end spectrum
+//      - far_q         : Q-domain of far end spectrum
+//
+static void UpdateFarHistory(AecmCore_t* self,
+                             uint16_t* far_spectrum,
+                             int far_q) {
+  // Get new buffer position
+  self->far_history_pos++;
+  if (self->far_history_pos >= MAX_DELAY) {
+    self->far_history_pos = 0;
+  }
+  // Update Q-domain buffer
+  self->far_q_domains[self->far_history_pos] = far_q;
+  // Update far end spectrum buffer
+  memcpy(&(self->far_history[self->far_history_pos * PART_LEN1]),
+         far_spectrum,
+         sizeof(uint16_t) * PART_LEN1);
+}
+
+// Returns a pointer to the far end spectrum aligned to current near end
+// spectrum. The function WebRtc_DelayEstimatorProcessFix(...) should have been
+// called before AlignedFarend(...). Otherwise, you get the pointer to the
+// previous frame. The memory is only valid until the next call of
+// WebRtc_DelayEstimatorProcessFix(...).
+//
+// Inputs:
+//      - self              : Pointer to the AECM instance.
+//      - delay             : Current delay estimate.
+//
+// Output:
+//      - far_q             : The Q-domain of the aligned far end spectrum
+//
+// Return value:
+//      - far_spectrum      : Pointer to the aligned far end spectrum
+//                            NULL - Error
+//
+static const uint16_t* AlignedFarend(AecmCore_t* self, int* far_q, int delay) {
+  int buffer_position = 0;
+  assert(self != NULL);
+  buffer_position = self->far_history_pos - delay;
+
+  // Check buffer position
+  if (buffer_position < 0) {
+    buffer_position += MAX_DELAY;
+  }
+  // Get Q-domain
+  *far_q = self->far_q_domains[buffer_position];
+  // Return far end spectrum
+  return &(self->far_history[buffer_position * PART_LEN1]);
+}
+
+#ifdef ARM_WINM_LOG
+HANDLE logFile = NULL;
+#endif
+
+// Declare function pointers.
+CalcLinearEnergies WebRtcAecm_CalcLinearEnergies;
+StoreAdaptiveChannel WebRtcAecm_StoreAdaptiveChannel;
+ResetAdaptiveChannel WebRtcAecm_ResetAdaptiveChannel;
+WindowAndFFT WebRtcAecm_WindowAndFFT;
+InverseFFTAndWindow WebRtcAecm_InverseFFTAndWindow;
+
+int WebRtcAecm_CreateCore(AecmCore_t **aecmInst)
+{
+    AecmCore_t *aecm = malloc(sizeof(AecmCore_t));
+    *aecmInst = aecm;
+    if (aecm == NULL)
+    {
+        return -1;
+    }
+
+    if (WebRtc_CreateBuffer(&aecm->farFrameBuf, FRAME_LEN + PART_LEN,
+                            sizeof(int16_t)) == -1)
+    {
+        WebRtcAecm_FreeCore(aecm);
+        aecm = NULL;
+        return -1;
+    }
+
+    if (WebRtc_CreateBuffer(&aecm->nearNoisyFrameBuf, FRAME_LEN + PART_LEN,
+                            sizeof(int16_t)) == -1)
+    {
+        WebRtcAecm_FreeCore(aecm);
+        aecm = NULL;
+        return -1;
+    }
+
+    if (WebRtc_CreateBuffer(&aecm->nearCleanFrameBuf, FRAME_LEN + PART_LEN,
+                            sizeof(int16_t)) == -1)
+    {
+        WebRtcAecm_FreeCore(aecm);
+        aecm = NULL;
+        return -1;
+    }
+
+    if (WebRtc_CreateBuffer(&aecm->outFrameBuf, FRAME_LEN + PART_LEN,
+                            sizeof(int16_t)) == -1)
+    {
+        WebRtcAecm_FreeCore(aecm);
+        aecm = NULL;
+        return -1;
+    }
+
+    if (WebRtc_CreateDelayEstimator(&aecm->delay_estimator,
+                                    PART_LEN1,
+                                    MAX_DELAY,
+                                    0) == -1) {
+      WebRtcAecm_FreeCore(aecm);
+      aecm = NULL;
+      return -1;
+    }
+
+    // Init some aecm pointers. 16 and 32 byte alignment is only necessary
+    // for Neon code currently.
+    aecm->xBuf = (WebRtc_Word16*) (((uintptr_t)aecm->xBuf_buf + 31) & ~ 31);
+    aecm->dBufClean = (WebRtc_Word16*) (((uintptr_t)aecm->dBufClean_buf + 31) & ~ 31);
+    aecm->dBufNoisy = (WebRtc_Word16*) (((uintptr_t)aecm->dBufNoisy_buf + 31) & ~ 31);
+    aecm->outBuf = (WebRtc_Word16*) (((uintptr_t)aecm->outBuf_buf + 15) & ~ 15);
+    aecm->channelStored = (WebRtc_Word16*) (((uintptr_t)
+                                             aecm->channelStored_buf + 15) & ~ 15);
+    aecm->channelAdapt16 = (WebRtc_Word16*) (((uintptr_t)
+                                              aecm->channelAdapt16_buf + 15) & ~ 15);
+    aecm->channelAdapt32 = (WebRtc_Word32*) (((uintptr_t)
+                                              aecm->channelAdapt32_buf + 31) & ~ 31);
+
+    return 0;
+}
+
+void WebRtcAecm_InitEchoPathCore(AecmCore_t* aecm, const WebRtc_Word16* echo_path)
+{
+    int i = 0;
+
+    // Reset the stored channel
+    memcpy(aecm->channelStored, echo_path, sizeof(WebRtc_Word16) * PART_LEN1);
+    // Reset the adapted channels
+    memcpy(aecm->channelAdapt16, echo_path, sizeof(WebRtc_Word16) * PART_LEN1);
+    for (i = 0; i < PART_LEN1; i++)
+    {
+        aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32(
+            (WebRtc_Word32)(aecm->channelAdapt16[i]), 16);
+    }
+
+    // Reset channel storing variables
+    aecm->mseAdaptOld = 1000;
+    aecm->mseStoredOld = 1000;
+    aecm->mseThreshold = WEBRTC_SPL_WORD32_MAX;
+    aecm->mseChannelCount = 0;
+}
+
+static void WindowAndFFTC(WebRtc_Word16* fft,
+                          const WebRtc_Word16* time_signal,
+                          complex16_t* freq_signal,
+                          int time_signal_scaling)
+{
+    int i, j;
+
+    memset(fft, 0, sizeof(WebRtc_Word16) * PART_LEN4);
+    // FFT of signal
+    for (i = 0, j = 0; i < PART_LEN; i++, j += 2)
+    {
+        // Window time domain signal and insert into real part of
+        // transformation array |fft|
+        fft[j] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(
+            (time_signal[i] << time_signal_scaling),
+            WebRtcAecm_kSqrtHanning[i],
+            14);
+        fft[PART_LEN2 + j] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(
+            (time_signal[i + PART_LEN] << time_signal_scaling),
+            WebRtcAecm_kSqrtHanning[PART_LEN - i],
+            14);
+        // Inserting zeros in imaginary parts not necessary since we
+        // initialized the array with all zeros
+    }
+
+    WebRtcSpl_ComplexBitReverse(fft, PART_LEN_SHIFT);
+    WebRtcSpl_ComplexFFT(fft, PART_LEN_SHIFT, 1);
+
+    // Take only the first PART_LEN2 samples
+    for (i = 0, j = 0; j < PART_LEN2; i += 1, j += 2)
+    {
+        freq_signal[i].real = fft[j];
+
+        // The imaginary part has to switch sign
+        freq_signal[i].imag = - fft[j+1];
+    }
+}
+
+static void InverseFFTAndWindowC(AecmCore_t* aecm,
+                                 WebRtc_Word16* fft,
+                                 complex16_t* efw,
+                                 WebRtc_Word16* output,
+                                 const WebRtc_Word16* nearendClean)
+{
+    int i, j, outCFFT;
+    WebRtc_Word32 tmp32no1;
+
+    // Synthesis
+    for (i = 1; i < PART_LEN; i++)
+    {
+        j = WEBRTC_SPL_LSHIFT_W32(i, 1);
+        fft[j] = efw[i].real;
+
+        // mirrored data, even
+        fft[PART_LEN4 - j] = efw[i].real;
+        fft[j + 1] = -efw[i].imag;
+
+        //mirrored data, odd
+        fft[PART_LEN4 - (j - 1)] = efw[i].imag;
+    }
+    fft[0] = efw[0].real;
+    fft[1] = -efw[0].imag;
+
+    fft[PART_LEN2] = efw[PART_LEN].real;
+    fft[PART_LEN2 + 1] = -efw[PART_LEN].imag;
+
+    // inverse FFT, result should be scaled with outCFFT
+    WebRtcSpl_ComplexBitReverse(fft, PART_LEN_SHIFT);
+    outCFFT = WebRtcSpl_ComplexIFFT(fft, PART_LEN_SHIFT, 1);
+
+    //take only the real values and scale with outCFFT
+    for (i = 0; i < PART_LEN2; i++)
+    {
+        j = WEBRTC_SPL_LSHIFT_W32(i, 1);
+        fft[i] = fft[j];
+    }
+
+    for (i = 0; i < PART_LEN; i++)
+    {
+        fft[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                fft[i],
+                WebRtcAecm_kSqrtHanning[i],
+                14);
+        tmp32no1 = WEBRTC_SPL_SHIFT_W32((WebRtc_Word32)fft[i],
+                outCFFT - aecm->dfaCleanQDomain);
+        fft[i] = (WebRtc_Word16)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX,
+                tmp32no1 + aecm->outBuf[i],
+                WEBRTC_SPL_WORD16_MIN);
+        output[i] = fft[i];
+
+        tmp32no1 = WEBRTC_SPL_MUL_16_16_RSFT(
+                fft[PART_LEN + i],
+                WebRtcAecm_kSqrtHanning[PART_LEN - i],
+                14);
+        tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1,
+                outCFFT - aecm->dfaCleanQDomain);
+        aecm->outBuf[i] = (WebRtc_Word16)WEBRTC_SPL_SAT(
+                WEBRTC_SPL_WORD16_MAX,
+                tmp32no1,
+                WEBRTC_SPL_WORD16_MIN);
+    }
+
+#ifdef ARM_WINM_LOG_
+    // measure tick end
+    QueryPerformanceCounter((LARGE_INTEGER*)&end);
+    diff__ = ((end - start) * 1000) / (freq/1000);
+    milliseconds = (unsigned int)(diff__ & 0xffffffff);
+    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
+#endif
+
+    // Copy the current block to the old position (aecm->outBuf is shifted elsewhere)
+    memcpy(aecm->xBuf, aecm->xBuf + PART_LEN, sizeof(WebRtc_Word16) * PART_LEN);
+    memcpy(aecm->dBufNoisy, aecm->dBufNoisy + PART_LEN, sizeof(WebRtc_Word16) * PART_LEN);
+    if (nearendClean != NULL)
+    {
+        memcpy(aecm->dBufClean, aecm->dBufClean + PART_LEN, sizeof(WebRtc_Word16) * PART_LEN);
+    }
+}
+
+static void CalcLinearEnergiesC(AecmCore_t* aecm,
+                                const WebRtc_UWord16* far_spectrum,
+                                WebRtc_Word32* echo_est,
+                                WebRtc_UWord32* far_energy,
+                                WebRtc_UWord32* echo_energy_adapt,
+                                WebRtc_UWord32* echo_energy_stored)
+{
+    int i;
+
+    // Get energy for the delayed far end signal and estimated
+    // echo using both stored and adapted channels.
+    for (i = 0; i < PART_LEN1; i++)
+    {
+        echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+                                           far_spectrum[i]);
+        (*far_energy) += (WebRtc_UWord32)(far_spectrum[i]);
+        (*echo_energy_adapt) += WEBRTC_SPL_UMUL_16_16(aecm->channelAdapt16[i],
+                                          far_spectrum[i]);
+        (*echo_energy_stored) += (WebRtc_UWord32)echo_est[i];
+    }
+}
+
+static void StoreAdaptiveChannelC(AecmCore_t* aecm,
+                                  const WebRtc_UWord16* far_spectrum,
+                                  WebRtc_Word32* echo_est)
+{
+    int i;
+
+    // During startup we store the channel every block.
+    memcpy(aecm->channelStored, aecm->channelAdapt16, sizeof(WebRtc_Word16) * PART_LEN1);
+    // Recalculate echo estimate
+    for (i = 0; i < PART_LEN; i += 4)
+    {
+        echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+                                           far_spectrum[i]);
+        echo_est[i + 1] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 1],
+                                           far_spectrum[i + 1]);
+        echo_est[i + 2] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 2],
+                                           far_spectrum[i + 2]);
+        echo_est[i + 3] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 3],
+                                           far_spectrum[i + 3]);
+    }
+    echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+                                       far_spectrum[i]);
+}
+
+static void ResetAdaptiveChannelC(AecmCore_t* aecm)
+{
+    int i;
+
+    // The stored channel has a significantly lower MSE than the adaptive one for
+    // two consecutive calculations. Reset the adaptive channel.
+    memcpy(aecm->channelAdapt16, aecm->channelStored,
+           sizeof(WebRtc_Word16) * PART_LEN1);
+    // Restore the W32 channel
+    for (i = 0; i < PART_LEN; i += 4)
+    {
+        aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32(
+                (WebRtc_Word32)aecm->channelStored[i], 16);
+        aecm->channelAdapt32[i + 1] = WEBRTC_SPL_LSHIFT_W32(
+                (WebRtc_Word32)aecm->channelStored[i + 1], 16);
+        aecm->channelAdapt32[i + 2] = WEBRTC_SPL_LSHIFT_W32(
+                (WebRtc_Word32)aecm->channelStored[i + 2], 16);
+        aecm->channelAdapt32[i + 3] = WEBRTC_SPL_LSHIFT_W32(
+                (WebRtc_Word32)aecm->channelStored[i + 3], 16);
+    }
+    aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)aecm->channelStored[i], 16);
+}
+
+// WebRtcAecm_InitCore(...)
+//
+// This function initializes the AECM instant created with WebRtcAecm_CreateCore(...)
+// Input:
+//      - aecm            : Pointer to the Echo Suppression instance
+//      - samplingFreq   : Sampling Frequency
+//
+// Output:
+//      - aecm            : Initialized instance
+//
+// Return value         :  0 - Ok
+//                        -1 - Error
+//
+int WebRtcAecm_InitCore(AecmCore_t * const aecm, int samplingFreq)
+{
+    int i = 0;
+    WebRtc_Word32 tmp32 = PART_LEN1 * PART_LEN1;
+    WebRtc_Word16 tmp16 = PART_LEN1;
+
+    if (samplingFreq != 8000 && samplingFreq != 16000)
+    {
+        samplingFreq = 8000;
+        return -1;
+    }
+    // sanity check of sampling frequency
+    aecm->mult = (WebRtc_Word16)samplingFreq / 8000;
+
+    aecm->farBufWritePos = 0;
+    aecm->farBufReadPos = 0;
+    aecm->knownDelay = 0;
+    aecm->lastKnownDelay = 0;
+
+    WebRtc_InitBuffer(aecm->farFrameBuf);
+    WebRtc_InitBuffer(aecm->nearNoisyFrameBuf);
+    WebRtc_InitBuffer(aecm->nearCleanFrameBuf);
+    WebRtc_InitBuffer(aecm->outFrameBuf);
+
+    memset(aecm->xBuf_buf, 0, sizeof(aecm->xBuf_buf));
+    memset(aecm->dBufClean_buf, 0, sizeof(aecm->dBufClean_buf));
+    memset(aecm->dBufNoisy_buf, 0, sizeof(aecm->dBufNoisy_buf));
+    memset(aecm->outBuf_buf, 0, sizeof(aecm->outBuf_buf));
+
+    aecm->seed = 666;
+    aecm->totCount = 0;
+
+    if (WebRtc_InitDelayEstimator(aecm->delay_estimator) != 0) {
+      return -1;
+    }
+    // Set far end histories to zero
+    memset(aecm->far_history, 0, sizeof(uint16_t) * PART_LEN1 * MAX_DELAY);
+    memset(aecm->far_q_domains, 0, sizeof(int) * MAX_DELAY);
+    aecm->far_history_pos = MAX_DELAY;
+
+    aecm->nlpFlag = 1;
+    aecm->fixedDelay = -1;
+
+    aecm->dfaCleanQDomain = 0;
+    aecm->dfaCleanQDomainOld = 0;
+    aecm->dfaNoisyQDomain = 0;
+    aecm->dfaNoisyQDomainOld = 0;
+
+    memset(aecm->nearLogEnergy, 0, sizeof(aecm->nearLogEnergy));
+    aecm->farLogEnergy = 0;
+    memset(aecm->echoAdaptLogEnergy, 0, sizeof(aecm->echoAdaptLogEnergy));
+    memset(aecm->echoStoredLogEnergy, 0, sizeof(aecm->echoStoredLogEnergy));
+
+    // Initialize the echo channels with a stored shape.
+    if (samplingFreq == 8000)
+    {
+        WebRtcAecm_InitEchoPathCore(aecm, kChannelStored8kHz);
+    }
+    else
+    {
+        WebRtcAecm_InitEchoPathCore(aecm, kChannelStored16kHz);
+    }
+
+    memset(aecm->echoFilt, 0, sizeof(aecm->echoFilt));
+    memset(aecm->nearFilt, 0, sizeof(aecm->nearFilt));
+    aecm->noiseEstCtr = 0;
+
+    aecm->cngMode = AecmTrue;
+
+    memset(aecm->noiseEstTooLowCtr, 0, sizeof(aecm->noiseEstTooLowCtr));
+    memset(aecm->noiseEstTooHighCtr, 0, sizeof(aecm->noiseEstTooHighCtr));
+    // Shape the initial noise level to an approximate pink noise.
+    for (i = 0; i < (PART_LEN1 >> 1) - 1; i++)
+    {
+        aecm->noiseEst[i] = (tmp32 << 8);
+        tmp16--;
+        tmp32 -= (WebRtc_Word32)((tmp16 << 1) + 1);
+    }
+    for (; i < PART_LEN1; i++)
+    {
+        aecm->noiseEst[i] = (tmp32 << 8);
+    }
+
+    aecm->farEnergyMin = WEBRTC_SPL_WORD16_MAX;
+    aecm->farEnergyMax = WEBRTC_SPL_WORD16_MIN;
+    aecm->farEnergyMaxMin = 0;
+    aecm->farEnergyVAD = FAR_ENERGY_MIN; // This prevents false speech detection at the
+                                         // beginning.
+    aecm->farEnergyMSE = 0;
+    aecm->currentVADValue = 0;
+    aecm->vadUpdateCount = 0;
+    aecm->firstVAD = 1;
+
+    aecm->startupState = 0;
+    aecm->supGain = SUPGAIN_DEFAULT;
+    aecm->supGainOld = SUPGAIN_DEFAULT;
+
+    aecm->supGainErrParamA = SUPGAIN_ERROR_PARAM_A;
+    aecm->supGainErrParamD = SUPGAIN_ERROR_PARAM_D;
+    aecm->supGainErrParamDiffAB = SUPGAIN_ERROR_PARAM_A - SUPGAIN_ERROR_PARAM_B;
+    aecm->supGainErrParamDiffBD = SUPGAIN_ERROR_PARAM_B - SUPGAIN_ERROR_PARAM_D;
+
+    assert(PART_LEN % 16 == 0);
+
+    // Initialize function pointers.
+    WebRtcAecm_WindowAndFFT = WindowAndFFTC;
+    WebRtcAecm_InverseFFTAndWindow = InverseFFTAndWindowC;
+    WebRtcAecm_CalcLinearEnergies = CalcLinearEnergiesC;
+    WebRtcAecm_StoreAdaptiveChannel = StoreAdaptiveChannelC;
+    WebRtcAecm_ResetAdaptiveChannel = ResetAdaptiveChannelC;
+
+#ifdef WEBRTC_DETECT_ARM_NEON
+    uint64_t features = WebRtc_GetCPUFeaturesARM();
+    if ((features & kCPUFeatureNEON) != 0)
+    {
+        WebRtcAecm_InitNeon();
+    }
+#elif defined(WEBRTC_ARCH_ARM_NEON)
+    WebRtcAecm_InitNeon();
+#endif
+
+    return 0;
+}
+
+// TODO(bjornv): This function is currently not used. Add support for these
+// parameters from a higher level
+int WebRtcAecm_Control(AecmCore_t *aecm, int delay, int nlpFlag)
+{
+    aecm->nlpFlag = nlpFlag;
+    aecm->fixedDelay = delay;
+
+    return 0;
+}
+
+int WebRtcAecm_FreeCore(AecmCore_t *aecm)
+{
+    if (aecm == NULL)
+    {
+        return -1;
+    }
+
+    WebRtc_FreeBuffer(aecm->farFrameBuf);
+    WebRtc_FreeBuffer(aecm->nearNoisyFrameBuf);
+    WebRtc_FreeBuffer(aecm->nearCleanFrameBuf);
+    WebRtc_FreeBuffer(aecm->outFrameBuf);
+
+    WebRtc_FreeDelayEstimator(aecm->delay_estimator);
+    free(aecm);
+
+    return 0;
+}
+
+int WebRtcAecm_ProcessFrame(AecmCore_t * aecm,
+                            const WebRtc_Word16 * farend,
+                            const WebRtc_Word16 * nearendNoisy,
+                            const WebRtc_Word16 * nearendClean,
+                            WebRtc_Word16 * out)
+{
+    WebRtc_Word16 outBlock_buf[PART_LEN + 8]; // Align buffer to 8-byte boundary.
+    WebRtc_Word16* outBlock = (WebRtc_Word16*) (((uintptr_t) outBlock_buf + 15) & ~ 15);
+
+    WebRtc_Word16 farFrame[FRAME_LEN];
+    const int16_t* out_ptr = NULL;
+    int size = 0;
+
+    // Buffer the current frame.
+    // Fetch an older one corresponding to the delay.
+    WebRtcAecm_BufferFarFrame(aecm, farend, FRAME_LEN);
+    WebRtcAecm_FetchFarFrame(aecm, farFrame, FRAME_LEN, aecm->knownDelay);
+
+    // Buffer the synchronized far and near frames,
+    // to pass the smaller blocks individually.
+    WebRtc_WriteBuffer(aecm->farFrameBuf, farFrame, FRAME_LEN);
+    WebRtc_WriteBuffer(aecm->nearNoisyFrameBuf, nearendNoisy, FRAME_LEN);
+    if (nearendClean != NULL)
+    {
+        WebRtc_WriteBuffer(aecm->nearCleanFrameBuf, nearendClean, FRAME_LEN);
+    }
+
+    // Process as many blocks as possible.
+    while (WebRtc_available_read(aecm->farFrameBuf) >= PART_LEN)
+    {
+        int16_t far_block[PART_LEN];
+        const int16_t* far_block_ptr = NULL;
+        int16_t near_noisy_block[PART_LEN];
+        const int16_t* near_noisy_block_ptr = NULL;
+
+        WebRtc_ReadBuffer(aecm->farFrameBuf, (void**) &far_block_ptr, far_block,
+                          PART_LEN);
+        WebRtc_ReadBuffer(aecm->nearNoisyFrameBuf,
+                          (void**) &near_noisy_block_ptr,
+                          near_noisy_block,
+                          PART_LEN);
+        if (nearendClean != NULL)
+        {
+            int16_t near_clean_block[PART_LEN];
+            const int16_t* near_clean_block_ptr = NULL;
+
+            WebRtc_ReadBuffer(aecm->nearCleanFrameBuf,
+                              (void**) &near_clean_block_ptr,
+                              near_clean_block,
+                              PART_LEN);
+            if (WebRtcAecm_ProcessBlock(aecm,
+                                        far_block_ptr,
+                                        near_noisy_block_ptr,
+                                        near_clean_block_ptr,
+                                        outBlock) == -1)
+            {
+                return -1;
+            }
+        } else
+        {
+            if (WebRtcAecm_ProcessBlock(aecm,
+                                        far_block_ptr,
+                                        near_noisy_block_ptr,
+                                        NULL,
+                                        outBlock) == -1)
+            {
+                return -1;
+            }
+        }
+
+        WebRtc_WriteBuffer(aecm->outFrameBuf, outBlock, PART_LEN);
+    }
+
+    // Stuff the out buffer if we have less than a frame to output.
+    // This should only happen for the first frame.
+    size = (int) WebRtc_available_read(aecm->outFrameBuf);
+    if (size < FRAME_LEN)
+    {
+        WebRtc_MoveReadPtr(aecm->outFrameBuf, size - FRAME_LEN);
+    }
+
+    // Obtain an output frame.
+    WebRtc_ReadBuffer(aecm->outFrameBuf, (void**) &out_ptr, out, FRAME_LEN);
+    if (out_ptr != out) {
+      // ReadBuffer() hasn't copied to |out| in this case.
+      memcpy(out, out_ptr, FRAME_LEN * sizeof(int16_t));
+    }
+
+    return 0;
+}
+
+// WebRtcAecm_AsymFilt(...)
+//
+// Performs asymmetric filtering.
+//
+// Inputs:
+//      - filtOld       : Previous filtered value.
+//      - inVal         : New input value.
+//      - stepSizePos   : Step size when we have a positive contribution.
+//      - stepSizeNeg   : Step size when we have a negative contribution.
+//
+// Output:
+//
+// Return: - Filtered value.
+//
+WebRtc_Word16 WebRtcAecm_AsymFilt(const WebRtc_Word16 filtOld, const WebRtc_Word16 inVal,
+                                  const WebRtc_Word16 stepSizePos,
+                                  const WebRtc_Word16 stepSizeNeg)
+{
+    WebRtc_Word16 retVal;
+
+    if ((filtOld == WEBRTC_SPL_WORD16_MAX) | (filtOld == WEBRTC_SPL_WORD16_MIN))
+    {
+        return inVal;
+    }
+    retVal = filtOld;
+    if (filtOld > inVal)
+    {
+        retVal -= WEBRTC_SPL_RSHIFT_W16(filtOld - inVal, stepSizeNeg);
+    } else
+    {
+        retVal += WEBRTC_SPL_RSHIFT_W16(inVal - filtOld, stepSizePos);
+    }
+
+    return retVal;
+}
+
+// WebRtcAecm_CalcEnergies(...)
+//
+// This function calculates the log of energies for nearend, farend and estimated
+// echoes. There is also an update of energy decision levels, i.e. internal VAD.
+//
+//
+// @param  aecm         [i/o]   Handle of the AECM instance.
+// @param  far_spectrum [in]    Pointer to farend spectrum.
+// @param  far_q        [in]    Q-domain of farend spectrum.
+// @param  nearEner     [in]    Near end energy for current block in
+//                              Q(aecm->dfaQDomain).
+// @param  echoEst      [out]   Estimated echo in Q(xfa_q+RESOLUTION_CHANNEL16).
+//
+void WebRtcAecm_CalcEnergies(AecmCore_t * aecm,
+                             const WebRtc_UWord16* far_spectrum,
+                             const WebRtc_Word16 far_q,
+                             const WebRtc_UWord32 nearEner,
+                             WebRtc_Word32 * echoEst)
+{
+    // Local variables
+    WebRtc_UWord32 tmpAdapt = 0;
+    WebRtc_UWord32 tmpStored = 0;
+    WebRtc_UWord32 tmpFar = 0;
+
+    int i;
+
+    WebRtc_Word16 zeros, frac;
+    WebRtc_Word16 tmp16;
+    WebRtc_Word16 increase_max_shifts = 4;
+    WebRtc_Word16 decrease_max_shifts = 11;
+    WebRtc_Word16 increase_min_shifts = 11;
+    WebRtc_Word16 decrease_min_shifts = 3;
+    WebRtc_Word16 kLogLowValue = WEBRTC_SPL_LSHIFT_W16(PART_LEN_SHIFT, 7);
+
+    // Get log of near end energy and store in buffer
+
+    // Shift buffer
+    memmove(aecm->nearLogEnergy + 1, aecm->nearLogEnergy,
+            sizeof(WebRtc_Word16) * (MAX_BUF_LEN - 1));
+
+    // Logarithm of integrated magnitude spectrum (nearEner)
+    tmp16 = kLogLowValue;
+    if (nearEner)
+    {
+        zeros = WebRtcSpl_NormU32(nearEner);
+        frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32(
+                              (WEBRTC_SPL_LSHIFT_U32(nearEner, zeros) & 0x7FFFFFFF),
+                              23);
+        // log2 in Q8
+        tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
+        tmp16 -= WEBRTC_SPL_LSHIFT_W16(aecm->dfaNoisyQDomain, 8);
+    }
+    aecm->nearLogEnergy[0] = tmp16;
+    // END: Get log of near end energy
+
+    WebRtcAecm_CalcLinearEnergies(aecm, far_spectrum, echoEst, &tmpFar, &tmpAdapt, &tmpStored);
+
+    // Shift buffers
+    memmove(aecm->echoAdaptLogEnergy + 1, aecm->echoAdaptLogEnergy,
+            sizeof(WebRtc_Word16) * (MAX_BUF_LEN - 1));
+    memmove(aecm->echoStoredLogEnergy + 1, aecm->echoStoredLogEnergy,
+            sizeof(WebRtc_Word16) * (MAX_BUF_LEN - 1));
+
+    // Logarithm of delayed far end energy
+    tmp16 = kLogLowValue;
+    if (tmpFar)
+    {
+        zeros = WebRtcSpl_NormU32(tmpFar);
+        frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpFar, zeros)
+                        & 0x7FFFFFFF), 23);
+        // log2 in Q8
+        tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
+        tmp16 -= WEBRTC_SPL_LSHIFT_W16(far_q, 8);
+    }
+    aecm->farLogEnergy = tmp16;
+
+    // Logarithm of estimated echo energy through adapted channel
+    tmp16 = kLogLowValue;
+    if (tmpAdapt)
+    {
+        zeros = WebRtcSpl_NormU32(tmpAdapt);
+        frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpAdapt, zeros)
+                        & 0x7FFFFFFF), 23);
+        //log2 in Q8
+        tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
+        tmp16 -= WEBRTC_SPL_LSHIFT_W16(RESOLUTION_CHANNEL16 + far_q, 8);
+    }
+    aecm->echoAdaptLogEnergy[0] = tmp16;
+
+    // Logarithm of estimated echo energy through stored channel
+    tmp16 = kLogLowValue;
+    if (tmpStored)
+    {
+        zeros = WebRtcSpl_NormU32(tmpStored);
+        frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpStored, zeros)
+                        & 0x7FFFFFFF), 23);
+        //log2 in Q8
+        tmp16 += WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
+        tmp16 -= WEBRTC_SPL_LSHIFT_W16(RESOLUTION_CHANNEL16 + far_q, 8);
+    }
+    aecm->echoStoredLogEnergy[0] = tmp16;
+
+    // Update farend energy levels (min, max, vad, mse)
+    if (aecm->farLogEnergy > FAR_ENERGY_MIN)
+    {
+        if (aecm->startupState == 0)
+        {
+            increase_max_shifts = 2;
+            decrease_min_shifts = 2;
+            increase_min_shifts = 8;
+        }
+
+        aecm->farEnergyMin = WebRtcAecm_AsymFilt(aecm->farEnergyMin, aecm->farLogEnergy,
+                                                 increase_min_shifts, decrease_min_shifts);
+        aecm->farEnergyMax = WebRtcAecm_AsymFilt(aecm->farEnergyMax, aecm->farLogEnergy,
+                                                 increase_max_shifts, decrease_max_shifts);
+        aecm->farEnergyMaxMin = (aecm->farEnergyMax - aecm->farEnergyMin);
+
+        // Dynamic VAD region size
+        tmp16 = 2560 - aecm->farEnergyMin;
+        if (tmp16 > 0)
+        {
+            tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16, FAR_ENERGY_VAD_REGION, 9);
+        } else
+        {
+            tmp16 = 0;
+        }
+        tmp16 += FAR_ENERGY_VAD_REGION;
+
+        if ((aecm->startupState == 0) | (aecm->vadUpdateCount > 1024))
+        {
+            // In startup phase or VAD update halted
+            aecm->farEnergyVAD = aecm->farEnergyMin + tmp16;
+        } else
+        {
+            if (aecm->farEnergyVAD > aecm->farLogEnergy)
+            {
+                aecm->farEnergyVAD += WEBRTC_SPL_RSHIFT_W16(aecm->farLogEnergy +
+                                                            tmp16 -
+                                                            aecm->farEnergyVAD,
+                                                            6);
+                aecm->vadUpdateCount = 0;
+            } else
+            {
+                aecm->vadUpdateCount++;
+            }
+        }
+        // Put MSE threshold higher than VAD
+        aecm->farEnergyMSE = aecm->farEnergyVAD + (1 << 8);
+    }
+
+    // Update VAD variables
+    if (aecm->farLogEnergy > aecm->farEnergyVAD)
+    {
+        if ((aecm->startupState == 0) | (aecm->farEnergyMaxMin > FAR_ENERGY_DIFF))
+        {
+            // We are in startup or have significant dynamics in input speech level
+            aecm->currentVADValue = 1;
+        }
+    } else
+    {
+        aecm->currentVADValue = 0;
+    }
+    if ((aecm->currentVADValue) && (aecm->firstVAD))
+    {
+        aecm->firstVAD = 0;
+        if (aecm->echoAdaptLogEnergy[0] > aecm->nearLogEnergy[0])
+        {
+            // The estimated echo has higher energy than the near end signal.
+            // This means that the initialization was too aggressive. Scale
+            // down by a factor 8
+            for (i = 0; i < PART_LEN1; i++)
+            {
+                aecm->channelAdapt16[i] >>= 3;
+            }
+            // Compensate the adapted echo energy level accordingly.
+            aecm->echoAdaptLogEnergy[0] -= (3 << 8);
+            aecm->firstVAD = 1;
+        }
+    }
+}
+
+// WebRtcAecm_CalcStepSize(...)
+//
+// This function calculates the step size used in channel estimation
+//
+//
+// @param  aecm  [in]    Handle of the AECM instance.
+// @param  mu    [out]   (Return value) Stepsize in log2(), i.e. number of shifts.
+//
+//
+WebRtc_Word16 WebRtcAecm_CalcStepSize(AecmCore_t * const aecm)
+{
+
+    WebRtc_Word32 tmp32;
+    WebRtc_Word16 tmp16;
+    WebRtc_Word16 mu = MU_MAX;
+
+    // Here we calculate the step size mu used in the
+    // following NLMS based Channel estimation algorithm
+    if (!aecm->currentVADValue)
+    {
+        // Far end energy level too low, no channel update
+        mu = 0;
+    } else if (aecm->startupState > 0)
+    {
+        if (aecm->farEnergyMin >= aecm->farEnergyMax)
+        {
+            mu = MU_MIN;
+        } else
+        {
+            tmp16 = (aecm->farLogEnergy - aecm->farEnergyMin);
+            tmp32 = WEBRTC_SPL_MUL_16_16(tmp16, MU_DIFF);
+            tmp32 = WebRtcSpl_DivW32W16(tmp32, aecm->farEnergyMaxMin);
+            mu = MU_MIN - 1 - (WebRtc_Word16)(tmp32);
+            // The -1 is an alternative to rounding. This way we get a larger
+            // stepsize, so we in some sense compensate for truncation in NLMS
+        }
+        if (mu < MU_MAX)
+        {
+            mu = MU_MAX; // Equivalent with maximum step size of 2^-MU_MAX
+        }
+    }
+
+    return mu;
+}
+
+// WebRtcAecm_UpdateChannel(...)
+//
+// This function performs channel estimation. NLMS and decision on channel storage.
+//
+//
+// @param  aecm         [i/o]   Handle of the AECM instance.
+// @param  far_spectrum [in]    Absolute value of the farend signal in Q(far_q)
+// @param  far_q        [in]    Q-domain of the farend signal
+// @param  dfa          [in]    Absolute value of the nearend signal (Q[aecm->dfaQDomain])
+// @param  mu           [in]    NLMS step size.
+// @param  echoEst      [i/o]   Estimated echo in Q(far_q+RESOLUTION_CHANNEL16).
+//
+void WebRtcAecm_UpdateChannel(AecmCore_t * aecm,
+                              const WebRtc_UWord16* far_spectrum,
+                              const WebRtc_Word16 far_q,
+                              const WebRtc_UWord16 * const dfa,
+                              const WebRtc_Word16 mu,
+                              WebRtc_Word32 * echoEst)
+{
+
+    WebRtc_UWord32 tmpU32no1, tmpU32no2;
+    WebRtc_Word32 tmp32no1, tmp32no2;
+    WebRtc_Word32 mseStored;
+    WebRtc_Word32 mseAdapt;
+
+    int i;
+
+    WebRtc_Word16 zerosFar, zerosNum, zerosCh, zerosDfa;
+    WebRtc_Word16 shiftChFar, shiftNum, shift2ResChan;
+    WebRtc_Word16 tmp16no1;
+    WebRtc_Word16 xfaQ, dfaQ;
+
+    // This is the channel estimation algorithm. It is base on NLMS but has a variable step
+    // length, which was calculated above.
+    if (mu)
+    {
+        for (i = 0; i < PART_LEN1; i++)
+        {
+            // Determine norm of channel and farend to make sure we don't get overflow in
+            // multiplication
+            zerosCh = WebRtcSpl_NormU32(aecm->channelAdapt32[i]);
+            zerosFar = WebRtcSpl_NormU32((WebRtc_UWord32)far_spectrum[i]);
+            if (zerosCh + zerosFar > 31)
+            {
+                // Multiplication is safe
+                tmpU32no1 = WEBRTC_SPL_UMUL_32_16(aecm->channelAdapt32[i],
+                        far_spectrum[i]);
+                shiftChFar = 0;
+            } else
+            {
+                // We need to shift down before multiplication
+                shiftChFar = 32 - zerosCh - zerosFar;
+                tmpU32no1 = WEBRTC_SPL_UMUL_32_16(
+                    WEBRTC_SPL_RSHIFT_W32(aecm->channelAdapt32[i], shiftChFar),
+                    far_spectrum[i]);
+            }
+            // Determine Q-domain of numerator
+            zerosNum = WebRtcSpl_NormU32(tmpU32no1);
+            if (dfa[i])
+            {
+                zerosDfa = WebRtcSpl_NormU32((WebRtc_UWord32)dfa[i]);
+            } else
+            {
+                zerosDfa = 32;
+            }
+            tmp16no1 = zerosDfa - 2 + aecm->dfaNoisyQDomain -
+                RESOLUTION_CHANNEL32 - far_q + shiftChFar;
+            if (zerosNum > tmp16no1 + 1)
+            {
+                xfaQ = tmp16no1;
+                dfaQ = zerosDfa - 2;
+            } else
+            {
+                xfaQ = zerosNum - 2;
+                dfaQ = RESOLUTION_CHANNEL32 + far_q - aecm->dfaNoisyQDomain -
+                    shiftChFar + xfaQ;
+            }
+            // Add in the same Q-domain
+            tmpU32no1 = WEBRTC_SPL_SHIFT_W32(tmpU32no1, xfaQ);
+            tmpU32no2 = WEBRTC_SPL_SHIFT_W32((WebRtc_UWord32)dfa[i], dfaQ);
+            tmp32no1 = (WebRtc_Word32)tmpU32no2 - (WebRtc_Word32)tmpU32no1;
+            zerosNum = WebRtcSpl_NormW32(tmp32no1);
+            if ((tmp32no1) && (far_spectrum[i] > (CHANNEL_VAD << far_q)))
+            {
+                //
+                // Update is needed
+                //
+                // This is what we would like to compute
+                //
+                // tmp32no1 = dfa[i] - (aecm->channelAdapt[i] * far_spectrum[i])
+                // tmp32norm = (i + 1)
+                // aecm->channelAdapt[i] += (2^mu) * tmp32no1
+                //                        / (tmp32norm * far_spectrum[i])
+                //
+
+                // Make sure we don't get overflow in multiplication.
+                if (zerosNum + zerosFar > 31)
+                {
+                    if (tmp32no1 > 0)
+                    {
+                        tmp32no2 = (WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(tmp32no1,
+                                                                        far_spectrum[i]);
+                    } else
+                    {
+                        tmp32no2 = -(WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(-tmp32no1,
+                                                                         far_spectrum[i]);
+                    }
+                    shiftNum = 0;
+                } else
+                {
+                    shiftNum = 32 - (zerosNum + zerosFar);
+                    if (tmp32no1 > 0)
+                    {
+                        tmp32no2 = (WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(
+                                WEBRTC_SPL_RSHIFT_W32(tmp32no1, shiftNum),
+                                far_spectrum[i]);
+                    } else
+                    {
+                        tmp32no2 = -(WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(
+                                WEBRTC_SPL_RSHIFT_W32(-tmp32no1, shiftNum),
+                                far_spectrum[i]);
+                    }
+                }
+                // Normalize with respect to frequency bin
+                tmp32no2 = WebRtcSpl_DivW32W16(tmp32no2, i + 1);
+                // Make sure we are in the right Q-domain
+                shift2ResChan = shiftNum + shiftChFar - xfaQ - mu - ((30 - zerosFar) << 1);
+                if (WebRtcSpl_NormW32(tmp32no2) < shift2ResChan)
+                {
+                    tmp32no2 = WEBRTC_SPL_WORD32_MAX;
+                } else
+                {
+                    tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, shift2ResChan);
+                }
+                aecm->channelAdapt32[i] = WEBRTC_SPL_ADD_SAT_W32(aecm->channelAdapt32[i],
+                        tmp32no2);
+                if (aecm->channelAdapt32[i] < 0)
+                {
+                    // We can never have negative channel gain
+                    aecm->channelAdapt32[i] = 0;
+                }
+                aecm->channelAdapt16[i]
+                        = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(aecm->channelAdapt32[i], 16);
+            }
+        }
+    }
+    // END: Adaptive channel update
+
+    // Determine if we should store or restore the channel
+    if ((aecm->startupState == 0) & (aecm->currentVADValue))
+    {
+        // During startup we store the channel every block,
+        // and we recalculate echo estimate
+        WebRtcAecm_StoreAdaptiveChannel(aecm, far_spectrum, echoEst);
+    } else
+    {
+        if (aecm->farLogEnergy < aecm->farEnergyMSE)
+        {
+            aecm->mseChannelCount = 0;
+        } else
+        {
+            aecm->mseChannelCount++;
+        }
+        // Enough data for validation. Store channel if we can.
+        if (aecm->mseChannelCount >= (MIN_MSE_COUNT + 10))
+        {
+            // We have enough data.
+            // Calculate MSE of "Adapt" and "Stored" versions.
+            // It is actually not MSE, but average absolute error.
+            mseStored = 0;
+            mseAdapt = 0;
+            for (i = 0; i < MIN_MSE_COUNT; i++)
+            {
+                tmp32no1 = ((WebRtc_Word32)aecm->echoStoredLogEnergy[i]
+                        - (WebRtc_Word32)aecm->nearLogEnergy[i]);
+                tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1);
+                mseStored += tmp32no2;
+
+                tmp32no1 = ((WebRtc_Word32)aecm->echoAdaptLogEnergy[i]
+                        - (WebRtc_Word32)aecm->nearLogEnergy[i]);
+                tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1);
+                mseAdapt += tmp32no2;
+            }
+            if (((mseStored << MSE_RESOLUTION) < (MIN_MSE_DIFF * mseAdapt))
+                    & ((aecm->mseStoredOld << MSE_RESOLUTION) < (MIN_MSE_DIFF
+                            * aecm->mseAdaptOld)))
+            {
+                // The stored channel has a significantly lower MSE than the adaptive one for
+                // two consecutive calculations. Reset the adaptive channel.
+                WebRtcAecm_ResetAdaptiveChannel(aecm);
+            } else if (((MIN_MSE_DIFF * mseStored) > (mseAdapt << MSE_RESOLUTION)) & (mseAdapt
+                    < aecm->mseThreshold) & (aecm->mseAdaptOld < aecm->mseThreshold))
+            {
+                // The adaptive channel has a significantly lower MSE than the stored one.
+                // The MSE for the adaptive channel has also been low for two consecutive
+                // calculations. Store the adaptive channel.
+                WebRtcAecm_StoreAdaptiveChannel(aecm, far_spectrum, echoEst);
+
+                // Update threshold
+                if (aecm->mseThreshold == WEBRTC_SPL_WORD32_MAX)
+                {
+                    aecm->mseThreshold = (mseAdapt + aecm->mseAdaptOld);
+                } else
+                {
+                    aecm->mseThreshold += WEBRTC_SPL_MUL_16_16_RSFT(mseAdapt
+                            - WEBRTC_SPL_MUL_16_16_RSFT(aecm->mseThreshold, 5, 3), 205, 8);
+                }
+
+            }
+
+            // Reset counter
+            aecm->mseChannelCount = 0;
+
+            // Store the MSE values.
+            aecm->mseStoredOld = mseStored;
+            aecm->mseAdaptOld = mseAdapt;
+        }
+    }
+    // END: Determine if we should store or reset channel estimate.
+}
+
+// CalcSuppressionGain(...)
+//
+// This function calculates the suppression gain that is used in the Wiener filter.
+//
+//
+// @param  aecm     [i/n]   Handle of the AECM instance.
+// @param  supGain  [out]   (Return value) Suppression gain with which to scale the noise
+//                          level (Q14).
+//
+//
+static WebRtc_Word16 CalcSuppressionGain(AecmCore_t * const aecm)
+{
+    WebRtc_Word32 tmp32no1;
+
+    WebRtc_Word16 supGain = SUPGAIN_DEFAULT;
+    WebRtc_Word16 tmp16no1;
+    WebRtc_Word16 dE = 0;
+
+    // Determine suppression gain used in the Wiener filter. The gain is based on a mix of far
+    // end energy and echo estimation error.
+    // Adjust for the far end signal level. A low signal level indicates no far end signal,
+    // hence we set the suppression gain to 0
+    if (!aecm->currentVADValue)
+    {
+        supGain = 0;
+    } else
+    {
+        // Adjust for possible double talk. If we have large variations in estimation error we
+        // likely have double talk (or poor channel).
+        tmp16no1 = (aecm->nearLogEnergy[0] - aecm->echoStoredLogEnergy[0] - ENERGY_DEV_OFFSET);
+        dE = WEBRTC_SPL_ABS_W16(tmp16no1);
+
+        if (dE < ENERGY_DEV_TOL)
+        {
+            // Likely no double talk. The better estimation, the more we can suppress signal.
+            // Update counters
+            if (dE < SUPGAIN_EPC_DT)
+            {
+                tmp32no1 = WEBRTC_SPL_MUL_16_16(aecm->supGainErrParamDiffAB, dE);
+                tmp32no1 += (SUPGAIN_EPC_DT >> 1);
+                tmp16no1 = (WebRtc_Word16)WebRtcSpl_DivW32W16(tmp32no1, SUPGAIN_EPC_DT);
+                supGain = aecm->supGainErrParamA - tmp16no1;
+            } else
+            {
+                tmp32no1 = WEBRTC_SPL_MUL_16_16(aecm->supGainErrParamDiffBD,
+                                                (ENERGY_DEV_TOL - dE));
+                tmp32no1 += ((ENERGY_DEV_TOL - SUPGAIN_EPC_DT) >> 1);
+                tmp16no1 = (WebRtc_Word16)WebRtcSpl_DivW32W16(tmp32no1, (ENERGY_DEV_TOL
+                        - SUPGAIN_EPC_DT));
+                supGain = aecm->supGainErrParamD + tmp16no1;
+            }
+        } else
+        {
+            // Likely in double talk. Use default value
+            supGain = aecm->supGainErrParamD;
+        }
+    }
+
+    if (supGain > aecm->supGainOld)
+    {
+        tmp16no1 = supGain;
+    } else
+    {
+        tmp16no1 = aecm->supGainOld;
+    }
+    aecm->supGainOld = supGain;
+    if (tmp16no1 < aecm->supGain)
+    {
+        aecm->supGain += (WebRtc_Word16)((tmp16no1 - aecm->supGain) >> 4);
+    } else
+    {
+        aecm->supGain += (WebRtc_Word16)((tmp16no1 - aecm->supGain) >> 4);
+    }
+
+    // END: Update suppression gain
+
+    return aecm->supGain;
+}
+
+// Transforms a time domain signal into the frequency domain, outputting the
+// complex valued signal, absolute value and sum of absolute values.
+//
+// time_signal          [in]    Pointer to time domain signal
+// freq_signal_real     [out]   Pointer to real part of frequency domain array
+// freq_signal_imag     [out]   Pointer to imaginary part of frequency domain
+//                              array
+// freq_signal_abs      [out]   Pointer to absolute value of frequency domain
+//                              array
+// freq_signal_sum_abs  [out]   Pointer to the sum of all absolute values in
+//                              the frequency domain array
+// return value                 The Q-domain of current frequency values
+//
+static int TimeToFrequencyDomain(const WebRtc_Word16* time_signal,
+                                 complex16_t* freq_signal,
+                                 WebRtc_UWord16* freq_signal_abs,
+                                 WebRtc_UWord32* freq_signal_sum_abs)
+{
+    int i = 0;
+    int time_signal_scaling = 0;
+
+    WebRtc_Word32 tmp32no1;
+    WebRtc_Word32 tmp32no2;
+
+    // In fft_buf, +16 for 32-byte alignment.
+    WebRtc_Word16 fft_buf[PART_LEN4 + 16];
+    WebRtc_Word16 *fft = (WebRtc_Word16 *) (((uintptr_t) fft_buf + 31) & ~31);
+
+    WebRtc_Word16 tmp16no1;
+    WebRtc_Word16 tmp16no2;
+#ifdef AECM_WITH_ABS_APPROX
+    WebRtc_Word16 max_value = 0;
+    WebRtc_Word16 min_value = 0;
+    WebRtc_UWord16 alpha = 0;
+    WebRtc_UWord16 beta = 0;
+#endif
+
+#ifdef AECM_DYNAMIC_Q
+    tmp16no1 = WebRtcSpl_MaxAbsValueW16(time_signal, PART_LEN2);
+    time_signal_scaling = WebRtcSpl_NormW16(tmp16no1);
+#endif
+
+    WebRtcAecm_WindowAndFFT(fft, time_signal, freq_signal, time_signal_scaling);
+
+    // Extract imaginary and real part, calculate the magnitude for all frequency bins
+    freq_signal[0].imag = 0;
+    freq_signal[PART_LEN].imag = 0;
+    freq_signal[PART_LEN].real = fft[PART_LEN2];
+    freq_signal_abs[0] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(
+        freq_signal[0].real);
+    freq_signal_abs[PART_LEN] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(
+        freq_signal[PART_LEN].real);
+    (*freq_signal_sum_abs) = (WebRtc_UWord32)(freq_signal_abs[0]) +
+        (WebRtc_UWord32)(freq_signal_abs[PART_LEN]);
+
+    for (i = 1; i < PART_LEN; i++)
+    {
+        if (freq_signal[i].real == 0)
+        {
+            freq_signal_abs[i] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(
+                freq_signal[i].imag);
+        }
+        else if (freq_signal[i].imag == 0)
+        {
+            freq_signal_abs[i] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(
+                freq_signal[i].real);
+        }
+        else
+        {
+            // Approximation for magnitude of complex fft output
+            // magn = sqrt(real^2 + imag^2)
+            // magn ~= alpha * max(|imag|,|real|) + beta * min(|imag|,|real|)
+            //
+            // The parameters alpha and beta are stored in Q15
+
+#ifdef AECM_WITH_ABS_APPROX
+            tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real);
+            tmp16no2 = WEBRTC_SPL_ABS_W16(freq_signal[i].imag);
+
+            if(tmp16no1 > tmp16no2)
+            {
+                max_value = tmp16no1;
+                min_value = tmp16no2;
+            } else
+            {
+                max_value = tmp16no2;
+                min_value = tmp16no1;
+            }
+
+            // Magnitude in Q(-6)
+            if ((max_value >> 2) > min_value)
+            {
+                alpha = kAlpha1;
+                beta = kBeta1;
+            } else if ((max_value >> 1) > min_value)
+            {
+                alpha = kAlpha2;
+                beta = kBeta2;
+            } else
+            {
+                alpha = kAlpha3;
+                beta = kBeta3;
+            }
+            tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(max_value,
+                                                                alpha,
+                                                                15);
+            tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(min_value,
+                                                                beta,
+                                                                15);
+            freq_signal_abs[i] = (WebRtc_UWord16)tmp16no1 +
+                (WebRtc_UWord16)tmp16no2;
+#else
+#ifdef WEBRTC_ARCH_ARM_V7A
+           __asm__("smulbb %0, %1, %2" : "=r"(tmp32no1) : "r"(freq_signal[i].real),
+                                                "r"(freq_signal[i].real));
+           __asm__("smlabb %0, %1, %2, %3" :: "r"(tmp32no2), "r"(freq_signal[i].imag), 
+                                                "r"(freq_signal[i].imag), "r"(tmp32no1));
+#else
+            tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real);
+            tmp16no2 = WEBRTC_SPL_ABS_W16(freq_signal[i].imag);
+            tmp32no1 = WEBRTC_SPL_MUL_16_16(tmp16no1, tmp16no1);
+            tmp32no2 = WEBRTC_SPL_MUL_16_16(tmp16no2, tmp16no2);
+            tmp32no2 = WEBRTC_SPL_ADD_SAT_W32(tmp32no1, tmp32no2);
+#endif // WEBRTC_ARCH_ARM_V7A
+            tmp32no1 = WebRtcSpl_SqrtFloor(tmp32no2);
+
+            freq_signal_abs[i] = (WebRtc_UWord16)tmp32no1;
+#endif // AECM_WITH_ABS_APPROX
+        }
+        (*freq_signal_sum_abs) += (WebRtc_UWord32)freq_signal_abs[i];
+    }
+
+    return time_signal_scaling;
+}
+
+int WebRtcAecm_ProcessBlock(AecmCore_t * aecm,
+                            const WebRtc_Word16 * farend,
+                            const WebRtc_Word16 * nearendNoisy,
+                            const WebRtc_Word16 * nearendClean,
+                            WebRtc_Word16 * output)
+{
+    int i;
+
+    WebRtc_UWord32 xfaSum;
+    WebRtc_UWord32 dfaNoisySum;
+    WebRtc_UWord32 dfaCleanSum;
+    WebRtc_UWord32 echoEst32Gained;
+    WebRtc_UWord32 tmpU32;
+
+    WebRtc_Word32 tmp32no1;
+
+    WebRtc_UWord16 xfa[PART_LEN1];
+    WebRtc_UWord16 dfaNoisy[PART_LEN1];
+    WebRtc_UWord16 dfaClean[PART_LEN1];
+    WebRtc_UWord16* ptrDfaClean = dfaClean;
+    const WebRtc_UWord16* far_spectrum_ptr = NULL;
+
+    // 32 byte aligned buffers (with +8 or +16).
+    // TODO (kma): define fft with complex16_t.
+    WebRtc_Word16 fft_buf[PART_LEN4 + 2 + 16]; // +2 to make a loop safe.
+    WebRtc_Word32 echoEst32_buf[PART_LEN1 + 8];
+    WebRtc_Word32 dfw_buf[PART_LEN1 + 8];
+    WebRtc_Word32 efw_buf[PART_LEN1 + 8];
+
+    WebRtc_Word16* fft = (WebRtc_Word16*) (((uintptr_t) fft_buf + 31) & ~ 31);
+    WebRtc_Word32* echoEst32 = (WebRtc_Word32*) (((uintptr_t) echoEst32_buf + 31) & ~ 31);
+    complex16_t* dfw = (complex16_t*) (((uintptr_t) dfw_buf + 31) & ~ 31);
+    complex16_t* efw = (complex16_t*) (((uintptr_t) efw_buf + 31) & ~ 31);
+
+    WebRtc_Word16 hnl[PART_LEN1];
+    WebRtc_Word16 numPosCoef = 0;
+    WebRtc_Word16 nlpGain = ONE_Q14;
+    int delay;
+    WebRtc_Word16 tmp16no1;
+    WebRtc_Word16 tmp16no2;
+    WebRtc_Word16 mu;
+    WebRtc_Word16 supGain;
+    WebRtc_Word16 zeros32, zeros16;
+    WebRtc_Word16 zerosDBufNoisy, zerosDBufClean, zerosXBuf;
+    int far_q;
+    WebRtc_Word16 resolutionDiff, qDomainDiff;
+
+    const int kMinPrefBand = 4;
+    const int kMaxPrefBand = 24;
+    WebRtc_Word32 avgHnl32 = 0;
+
+#ifdef ARM_WINM_LOG_
+    DWORD temp;
+    static int flag0 = 0;
+    __int64 freq, start, end, diff__;
+    unsigned int milliseconds;
+#endif
+
+    // Determine startup state. There are three states:
+    // (0) the first CONV_LEN blocks
+    // (1) another CONV_LEN blocks
+    // (2) the rest
+
+    if (aecm->startupState < 2)
+    {
+        aecm->startupState = (aecm->totCount >= CONV_LEN) + (aecm->totCount >= CONV_LEN2);
+    }
+    // END: Determine startup state
+
+    // Buffer near and far end signals
+    memcpy(aecm->xBuf + PART_LEN, farend, sizeof(WebRtc_Word16) * PART_LEN);
+    memcpy(aecm->dBufNoisy + PART_LEN, nearendNoisy, sizeof(WebRtc_Word16) * PART_LEN);
+    if (nearendClean != NULL)
+    {
+        memcpy(aecm->dBufClean + PART_LEN, nearendClean, sizeof(WebRtc_Word16) * PART_LEN);
+    }
+
+#ifdef ARM_WINM_LOG_
+    // measure tick start
+    QueryPerformanceFrequency((LARGE_INTEGER*)&freq);
+    QueryPerformanceCounter((LARGE_INTEGER*)&start);
+#endif
+
+    // Transform far end signal from time domain to frequency domain.
+    far_q = TimeToFrequencyDomain(aecm->xBuf,
+                                  dfw,
+                                  xfa,
+                                  &xfaSum);
+
+    // Transform noisy near end signal from time domain to frequency domain.
+    zerosDBufNoisy = TimeToFrequencyDomain(aecm->dBufNoisy,
+                                           dfw,
+                                           dfaNoisy,
+                                           &dfaNoisySum);
+    aecm->dfaNoisyQDomainOld = aecm->dfaNoisyQDomain;
+    aecm->dfaNoisyQDomain = (WebRtc_Word16)zerosDBufNoisy;
+
+
+    if (nearendClean == NULL)
+    {
+        ptrDfaClean = dfaNoisy;
+        aecm->dfaCleanQDomainOld = aecm->dfaNoisyQDomainOld;
+        aecm->dfaCleanQDomain = aecm->dfaNoisyQDomain;
+        dfaCleanSum = dfaNoisySum;
+    } else
+    {
+        // Transform clean near end signal from time domain to frequency domain.
+        zerosDBufClean = TimeToFrequencyDomain(aecm->dBufClean,
+                                               dfw,
+                                               dfaClean,
+                                               &dfaCleanSum);
+        aecm->dfaCleanQDomainOld = aecm->dfaCleanQDomain;
+        aecm->dfaCleanQDomain = (WebRtc_Word16)zerosDBufClean;
+    }
+
+#ifdef ARM_WINM_LOG_
+    // measure tick end
+    QueryPerformanceCounter((LARGE_INTEGER*)&end);
+    diff__ = ((end - start) * 1000) / (freq/1000);
+    milliseconds = (unsigned int)(diff__ & 0xffffffff);
+    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
+    // measure tick start
+    QueryPerformanceCounter((LARGE_INTEGER*)&start);
+#endif
+
+    // Get the delay
+    // Save far-end history and estimate delay
+    UpdateFarHistory(aecm, xfa, far_q);
+    delay = WebRtc_DelayEstimatorProcessFix(aecm->delay_estimator,
+                                            xfa,
+                                            dfaNoisy,
+                                            PART_LEN1,
+                                            far_q,
+                                            zerosDBufNoisy);
+    if (delay == -1)
+    {
+        return -1;
+    }
+    else if (delay == -2)
+    {
+        // If the delay is unknown, we assume zero.
+        // NOTE: this will have to be adjusted if we ever add lookahead.
+        delay = 0;
+    }
+
+    if (aecm->fixedDelay >= 0)
+    {
+        // Use fixed delay
+        delay = aecm->fixedDelay;
+    }
+
+#ifdef ARM_WINM_LOG_
+    // measure tick end
+    QueryPerformanceCounter((LARGE_INTEGER*)&end);
+    diff__ = ((end - start) * 1000) / (freq/1000);
+    milliseconds = (unsigned int)(diff__ & 0xffffffff);
+    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
+    // measure tick start
+    QueryPerformanceCounter((LARGE_INTEGER*)&start);
+#endif
+    // Get aligned far end spectrum
+    far_spectrum_ptr = AlignedFarend(aecm, &far_q, delay);
+    zerosXBuf = (WebRtc_Word16) far_q;
+    if (far_spectrum_ptr == NULL)
+    {
+        return -1;
+    }
+
+    // Calculate log(energy) and update energy threshold levels
+    WebRtcAecm_CalcEnergies(aecm,
+                            far_spectrum_ptr,
+                            zerosXBuf,
+                            dfaNoisySum,
+                            echoEst32);
+
+    // Calculate stepsize
+    mu = WebRtcAecm_CalcStepSize(aecm);
+
+    // Update counters
+    aecm->totCount++;
+
+    // This is the channel estimation algorithm.
+    // It is base on NLMS but has a variable step length, which was calculated above.
+    WebRtcAecm_UpdateChannel(aecm, far_spectrum_ptr, zerosXBuf, dfaNoisy, mu, echoEst32);
+    supGain = CalcSuppressionGain(aecm);
+
+#ifdef ARM_WINM_LOG_
+    // measure tick end
+    QueryPerformanceCounter((LARGE_INTEGER*)&end);
+    diff__ = ((end - start) * 1000) / (freq/1000);
+    milliseconds = (unsigned int)(diff__ & 0xffffffff);
+    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
+    // measure tick start
+    QueryPerformanceCounter((LARGE_INTEGER*)&start);
+#endif
+
+    // Calculate Wiener filter hnl[]
+    for (i = 0; i < PART_LEN1; i++)
+    {
+        // Far end signal through channel estimate in Q8
+        // How much can we shift right to preserve resolution
+        tmp32no1 = echoEst32[i] - aecm->echoFilt[i];
+        aecm->echoFilt[i] += WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_32_16(tmp32no1, 50), 8);
+
+        zeros32 = WebRtcSpl_NormW32(aecm->echoFilt[i]) + 1;
+        zeros16 = WebRtcSpl_NormW16(supGain) + 1;
+        if (zeros32 + zeros16 > 16)
+        {
+            // Multiplication is safe
+            // Result in Q(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN+aecm->xfaQDomainBuf[diff])
+            echoEst32Gained = WEBRTC_SPL_UMUL_32_16((WebRtc_UWord32)aecm->echoFilt[i],
+                                                    (WebRtc_UWord16)supGain);
+            resolutionDiff = 14 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN;
+            resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf);
+        } else
+        {
+            tmp16no1 = 17 - zeros32 - zeros16;
+            resolutionDiff = 14 + tmp16no1 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN;
+            resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf);
+            if (zeros32 > tmp16no1)
+            {
+                echoEst32Gained = WEBRTC_SPL_UMUL_32_16((WebRtc_UWord32)aecm->echoFilt[i],
+                        (WebRtc_UWord16)WEBRTC_SPL_RSHIFT_W16(supGain,
+                                tmp16no1)); // Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16)
+            } else
+            {
+                // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16)
+                echoEst32Gained = WEBRTC_SPL_UMUL_32_16(
+                        (WebRtc_UWord32)WEBRTC_SPL_RSHIFT_W32(aecm->echoFilt[i], tmp16no1),
+                        (WebRtc_UWord16)supGain);
+            }
+        }
+
+        zeros16 = WebRtcSpl_NormW16(aecm->nearFilt[i]);
+        if ((zeros16 < (aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld))
+                & (aecm->nearFilt[i]))
+        {
+            tmp16no1 = WEBRTC_SPL_SHIFT_W16(aecm->nearFilt[i], zeros16);
+            qDomainDiff = zeros16 - aecm->dfaCleanQDomain + aecm->dfaCleanQDomainOld;
+        } else
+        {
+            tmp16no1 = WEBRTC_SPL_SHIFT_W16(aecm->nearFilt[i],
+                                            aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld);
+            qDomainDiff = 0;
+        }
+        tmp16no2 = WEBRTC_SPL_SHIFT_W16(ptrDfaClean[i], qDomainDiff);
+        tmp32no1 = (WebRtc_Word32)(tmp16no2 - tmp16no1);
+        tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 4);
+        tmp16no2 += tmp16no1;
+        zeros16 = WebRtcSpl_NormW16(tmp16no2);
+        if ((tmp16no2) & (-qDomainDiff > zeros16))
+        {
+            aecm->nearFilt[i] = WEBRTC_SPL_WORD16_MAX;
+        } else
+        {
+            aecm->nearFilt[i] = WEBRTC_SPL_SHIFT_W16(tmp16no2, -qDomainDiff);
+        }
+
+        // Wiener filter coefficients, resulting hnl in Q14
+        if (echoEst32Gained == 0)
+        {
+            hnl[i] = ONE_Q14;
+        } else if (aecm->nearFilt[i] == 0)
+        {
+            hnl[i] = 0;
+        } else
+        {
+            // Multiply the suppression gain
+            // Rounding
+            echoEst32Gained += (WebRtc_UWord32)(aecm->nearFilt[i] >> 1);
+            tmpU32 = WebRtcSpl_DivU32U16(echoEst32Gained, (WebRtc_UWord16)aecm->nearFilt[i]);
+
+            // Current resolution is
+            // Q-(RESOLUTION_CHANNEL + RESOLUTION_SUPGAIN - max(0, 17 - zeros16 - zeros32))
+            // Make sure we are in Q14
+            tmp32no1 = (WebRtc_Word32)WEBRTC_SPL_SHIFT_W32(tmpU32, resolutionDiff);
+            if (tmp32no1 > ONE_Q14)
+            {
+                hnl[i] = 0;
+            } else if (tmp32no1 < 0)
+            {
+                hnl[i] = ONE_Q14;
+            } else
+            {
+                // 1-echoEst/dfa
+                hnl[i] = ONE_Q14 - (WebRtc_Word16)tmp32no1;
+                if (hnl[i] < 0)
+                {
+                    hnl[i] = 0;
+                }
+            }
+        }
+        if (hnl[i])
+        {
+            numPosCoef++;
+        }
+    }
+    // Only in wideband. Prevent the gain in upper band from being larger than
+    // in lower band.
+    if (aecm->mult == 2)
+    {
+        // TODO(bjornv): Investigate if the scaling of hnl[i] below can cause
+        //               speech distortion in double-talk.
+        for (i = 0; i < PART_LEN1; i++)
+        {
+            hnl[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(hnl[i], hnl[i], 14);
+        }
+
+        for (i = kMinPrefBand; i <= kMaxPrefBand; i++)
+        {
+            avgHnl32 += (WebRtc_Word32)hnl[i];
+        }
+        assert(kMaxPrefBand - kMinPrefBand + 1 > 0);
+        avgHnl32 /= (kMaxPrefBand - kMinPrefBand + 1);
+
+        for (i = kMaxPrefBand; i < PART_LEN1; i++)
+        {
+            if (hnl[i] > (WebRtc_Word16)avgHnl32)
+            {
+                hnl[i] = (WebRtc_Word16)avgHnl32;
+            }
+        }
+    }
+
+#ifdef ARM_WINM_LOG_
+    // measure tick end
+    QueryPerformanceCounter((LARGE_INTEGER*)&end);
+    diff__ = ((end - start) * 1000) / (freq/1000);
+    milliseconds = (unsigned int)(diff__ & 0xffffffff);
+    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
+    // measure tick start
+    QueryPerformanceCounter((LARGE_INTEGER*)&start);
+#endif
+
+    // Calculate NLP gain, result is in Q14
+    if (aecm->nlpFlag)
+    {
+        for (i = 0; i < PART_LEN1; i++)
+        {
+            // Truncate values close to zero and one.
+            if (hnl[i] > NLP_COMP_HIGH)
+            {
+                hnl[i] = ONE_Q14;
+            } else if (hnl[i] < NLP_COMP_LOW)
+            {
+                hnl[i] = 0;
+            }
+    
+            // Remove outliers
+            if (numPosCoef < 3)
+            {
+                nlpGain = 0;
+            } else
+            {
+                nlpGain = ONE_Q14;
+            }
+
+            // NLP
+            if ((hnl[i] == ONE_Q14) && (nlpGain == ONE_Q14))
+            {
+                hnl[i] = ONE_Q14;
+            } else
+            {
+                hnl[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(hnl[i], nlpGain, 14);
+            }
+
+            // multiply with Wiener coefficients
+            efw[i].real = (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real,
+                                                                            hnl[i], 14));
+            efw[i].imag = (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag,
+                                                                            hnl[i], 14));
+        }
+    }
+    else
+    {
+        // multiply with Wiener coefficients
+        for (i = 0; i < PART_LEN1; i++)
+        {
+            efw[i].real = (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real,
+                                                                           hnl[i], 14));
+            efw[i].imag = (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag,
+                                                                           hnl[i], 14));
+        }
+    }
+
+    if (aecm->cngMode == AecmTrue)
+    {
+        ComfortNoise(aecm, ptrDfaClean, efw, hnl);
+    }
+
+#ifdef ARM_WINM_LOG_
+    // measure tick end
+    QueryPerformanceCounter((LARGE_INTEGER*)&end);
+    diff__ = ((end - start) * 1000) / (freq/1000);
+    milliseconds = (unsigned int)(diff__ & 0xffffffff);
+    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
+    // measure tick start
+    QueryPerformanceCounter((LARGE_INTEGER*)&start);
+#endif
+
+    WebRtcAecm_InverseFFTAndWindow(aecm, fft, efw, output, nearendClean);
+
+    return 0;
+}
+
+
+// Generate comfort noise and add to output signal.
+//
+// \param[in]     aecm     Handle of the AECM instance.
+// \param[in]     dfa     Absolute value of the nearend signal (Q[aecm->dfaQDomain]).
+// \param[in,out] outReal Real part of the output signal (Q[aecm->dfaQDomain]).
+// \param[in,out] outImag Imaginary part of the output signal (Q[aecm->dfaQDomain]).
+// \param[in]     lambda  Suppression gain with which to scale the noise level (Q14).
+//
+static void ComfortNoise(AecmCore_t* aecm,
+                         const WebRtc_UWord16* dfa,
+                         complex16_t* out,
+                         const WebRtc_Word16* lambda)
+{
+    WebRtc_Word16 i;
+    WebRtc_Word16 tmp16;
+    WebRtc_Word32 tmp32;
+
+    WebRtc_Word16 randW16[PART_LEN];
+    WebRtc_Word16 uReal[PART_LEN1];
+    WebRtc_Word16 uImag[PART_LEN1];
+    WebRtc_Word32 outLShift32;
+    WebRtc_Word16 noiseRShift16[PART_LEN1];
+
+    WebRtc_Word16 shiftFromNearToNoise = kNoiseEstQDomain - aecm->dfaCleanQDomain;
+    WebRtc_Word16 minTrackShift;
+
+    assert(shiftFromNearToNoise >= 0);
+    assert(shiftFromNearToNoise < 16);
+
+    if (aecm->noiseEstCtr < 100)
+    {
+        // Track the minimum more quickly initially.
+        aecm->noiseEstCtr++;
+        minTrackShift = 6;
+    } else
+    {
+        minTrackShift = 9;
+    }
+
+    // Estimate noise power.
+    for (i = 0; i < PART_LEN1; i++)
+    {
+
+        // Shift to the noise domain.
+        tmp32 = (WebRtc_Word32)dfa[i];
+        outLShift32 = WEBRTC_SPL_LSHIFT_W32(tmp32, shiftFromNearToNoise);
+
+        if (outLShift32 < aecm->noiseEst[i])
+        {
+            // Reset "too low" counter
+            aecm->noiseEstTooLowCtr[i] = 0;
+            // Track the minimum.
+            if (aecm->noiseEst[i] < (1 << minTrackShift))
+            {
+                // For small values, decrease noiseEst[i] every
+                // |kNoiseEstIncCount| block. The regular approach below can not
+                // go further down due to truncation.
+                aecm->noiseEstTooHighCtr[i]++;
+                if (aecm->noiseEstTooHighCtr[i] >= kNoiseEstIncCount)
+                {
+                    aecm->noiseEst[i]--;
+                    aecm->noiseEstTooHighCtr[i] = 0; // Reset the counter
+                }
+            }
+            else
+            {
+                aecm->noiseEst[i] -= ((aecm->noiseEst[i] - outLShift32) >> minTrackShift);
+            }
+        } else
+        {
+            // Reset "too high" counter
+            aecm->noiseEstTooHighCtr[i] = 0;
+            // Ramp slowly upwards until we hit the minimum again.
+            if ((aecm->noiseEst[i] >> 19) > 0)
+            {
+                // Avoid overflow.
+                // Multiplication with 2049 will cause wrap around. Scale
+                // down first and then multiply
+                aecm->noiseEst[i] >>= 11;
+                aecm->noiseEst[i] *= 2049;
+            }
+            else if ((aecm->noiseEst[i] >> 11) > 0)
+            {
+                // Large enough for relative increase
+                aecm->noiseEst[i] *= 2049;
+                aecm->noiseEst[i] >>= 11;
+            }
+            else
+            {
+                // Make incremental increases based on size every
+                // |kNoiseEstIncCount| block
+                aecm->noiseEstTooLowCtr[i]++;
+                if (aecm->noiseEstTooLowCtr[i] >= kNoiseEstIncCount)
+                {
+                    aecm->noiseEst[i] += (aecm->noiseEst[i] >> 9) + 1;
+                    aecm->noiseEstTooLowCtr[i] = 0; // Reset counter
+                }
+            }
+        }
+    }
+
+    for (i = 0; i < PART_LEN1; i++)
+    {
+        tmp32 = WEBRTC_SPL_RSHIFT_W32(aecm->noiseEst[i], shiftFromNearToNoise);
+        if (tmp32 > 32767)
+        {
+            tmp32 = 32767;
+            aecm->noiseEst[i] = WEBRTC_SPL_LSHIFT_W32(tmp32, shiftFromNearToNoise);
+        }
+        noiseRShift16[i] = (WebRtc_Word16)tmp32;
+
+        tmp16 = ONE_Q14 - lambda[i];
+        noiseRShift16[i]
+                = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16, noiseRShift16[i], 14);
+    }
+
+    // Generate a uniform random array on [0 2^15-1].
+    WebRtcSpl_RandUArray(randW16, PART_LEN, &aecm->seed);
+
+    // Generate noise according to estimated energy.
+    uReal[0] = 0; // Reject LF noise.
+    uImag[0] = 0;
+    for (i = 1; i < PART_LEN1; i++)
+    {
+        // Get a random index for the cos and sin tables over [0 359].
+        tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(359, randW16[i - 1], 15);
+
+        // Tables are in Q13.
+        uReal[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(noiseRShift16[i],
+                kCosTable[tmp16], 13);
+        uImag[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(-noiseRShift16[i],
+                kSinTable[tmp16], 13);
+    }
+    uImag[PART_LEN] = 0;
+
+#if (!defined ARM_WINM) && (!defined ARM9E_GCC) && (!defined ANDROID_AECOPT)
+    for (i = 0; i < PART_LEN1; i++)
+    {
+        out[i].real = WEBRTC_SPL_ADD_SAT_W16(out[i].real, uReal[i]);
+        out[i].imag = WEBRTC_SPL_ADD_SAT_W16(out[i].imag, uImag[i]);
+    }
+#else
+    for (i = 0; i < PART_LEN1 -1; )
+    {
+        out[i].real = WEBRTC_SPL_ADD_SAT_W16(out[i].real, uReal[i]);
+        out[i].imag = WEBRTC_SPL_ADD_SAT_W16(out[i].imag, uImag[i]);
+        i++;
+
+        out[i].real = WEBRTC_SPL_ADD_SAT_W16(out[i].real, uReal[i]);
+        out[i].imag = WEBRTC_SPL_ADD_SAT_W16(out[i].imag, uImag[i]);
+        i++;
+    }
+    out[i].real = WEBRTC_SPL_ADD_SAT_W16(out[i].real, uReal[i]);
+    out[i].imag = WEBRTC_SPL_ADD_SAT_W16(out[i].imag, uImag[i]);
+#endif
+}
+
+void WebRtcAecm_BufferFarFrame(AecmCore_t* const aecm,
+                               const WebRtc_Word16* const farend,
+                               const int farLen)
+{
+    int writeLen = farLen, writePos = 0;
+
+    // Check if the write position must be wrapped
+    while (aecm->farBufWritePos + writeLen > FAR_BUF_LEN)
+    {
+        // Write to remaining buffer space before wrapping
+        writeLen = FAR_BUF_LEN - aecm->farBufWritePos;
+        memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos,
+               sizeof(WebRtc_Word16) * writeLen);
+        aecm->farBufWritePos = 0;
+        writePos = writeLen;
+        writeLen = farLen - writeLen;
+    }
+
+    memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos,
+           sizeof(WebRtc_Word16) * writeLen);
+    aecm->farBufWritePos += writeLen;
+}
+
+void WebRtcAecm_FetchFarFrame(AecmCore_t * const aecm, WebRtc_Word16 * const farend,
+                              const int farLen, const int knownDelay)
+{
+    int readLen = farLen;
+    int readPos = 0;
+    int delayChange = knownDelay - aecm->lastKnownDelay;
+
+    aecm->farBufReadPos -= delayChange;
+
+    // Check if delay forces a read position wrap
+    while (aecm->farBufReadPos < 0)
+    {
+        aecm->farBufReadPos += FAR_BUF_LEN;
+    }
+    while (aecm->farBufReadPos > FAR_BUF_LEN - 1)
+    {
+        aecm->farBufReadPos -= FAR_BUF_LEN;
+    }
+
+    aecm->lastKnownDelay = knownDelay;
+
+    // Check if read position must be wrapped
+    while (aecm->farBufReadPos + readLen > FAR_BUF_LEN)
+    {
+
+        // Read from remaining buffer space before wrapping
+        readLen = FAR_BUF_LEN - aecm->farBufReadPos;
+        memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos,
+               sizeof(WebRtc_Word16) * readLen);
+        aecm->farBufReadPos = 0;
+        readPos = readLen;
+        readLen = farLen - readLen;
+    }
+    memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos,
+           sizeof(WebRtc_Word16) * readLen);
+    aecm->farBufReadPos += readLen;
+}
+
diff --git a/src/modules/audio_processing/aecm/main/source/aecm_core.h b/src/modules/audio_processing/aecm/aecm_core.h
similarity index 72%
rename from src/modules/audio_processing/aecm/main/source/aecm_core.h
rename to src/modules/audio_processing/aecm/aecm_core.h
index 5defbe4..0ec62ec 100644
--- a/src/modules/audio_processing/aecm/main/source/aecm_core.h
+++ b/src/modules/audio_processing/aecm/aecm_core.h
@@ -17,14 +17,8 @@
 //#define AECM_WITH_ABS_APPROX
 //#define AECM_SHORT                // for 32 sample partition length (otherwise 64)
 
-// TODO(bjornv): These defines will be removed in final version.
-//#define STORE_CHANNEL_DATA
-//#define VAD_DATA
-
 #include "typedefs.h"
 #include "signal_processing_library.h"
-// TODO(bjornv): Will be removed in final version.
-//#include <stdio.h>
 
 // Algorithm parameters
 
@@ -103,6 +97,13 @@
 #define NLP_COMP_LOW    3277            // 0.2 in Q14
 #define NLP_COMP_HIGH   ONE_Q14         // 1 in Q14
 
+extern const WebRtc_Word16 WebRtcAecm_kSqrtHanning[];
+
+typedef struct {
+    WebRtc_Word16 real;
+    WebRtc_Word16 imag;
+} complex16_t;
+
 typedef struct
 {
     int farBufWritePos;
@@ -116,50 +117,60 @@
     void *nearCleanFrameBuf;
     void *outFrameBuf;
 
-    WebRtc_Word16 xBuf[PART_LEN2]; // farend
-    WebRtc_Word16 dBufClean[PART_LEN2]; // nearend
-    WebRtc_Word16 dBufNoisy[PART_LEN2]; // nearend
-    WebRtc_Word16 outBuf[PART_LEN];
-
     WebRtc_Word16 farBuf[FAR_BUF_LEN];
 
     WebRtc_Word16 mult;
     WebRtc_UWord32 seed;
 
     // Delay estimation variables
-    WebRtc_UWord16 medianYlogspec[PART_LEN1];
-    WebRtc_UWord16 medianXlogspec[PART_LEN1];
-    WebRtc_UWord16 medianBCount[MAX_DELAY];
-    WebRtc_UWord16 xfaHistory[PART_LEN1][MAX_DELAY];
-    WebRtc_Word16 delHistoryPos;
-    WebRtc_UWord32 bxHistory[MAX_DELAY];
+    void* delay_estimator;
     WebRtc_UWord16 currentDelay;
-    WebRtc_UWord16 previousDelay;
-    WebRtc_Word16 delayAdjust;
+    // Far end history variables
+    // TODO(bjornv): Replace |far_history| with ring_buffer.
+    uint16_t far_history[PART_LEN1 * MAX_DELAY];
+    int far_history_pos;
+    int far_q_domains[MAX_DELAY];
 
     WebRtc_Word16 nlpFlag;
     WebRtc_Word16 fixedDelay;
 
     WebRtc_UWord32 totCount;
 
-    WebRtc_Word16 xfaQDomainBuf[MAX_DELAY];
     WebRtc_Word16 dfaCleanQDomain;
     WebRtc_Word16 dfaCleanQDomainOld;
     WebRtc_Word16 dfaNoisyQDomain;
     WebRtc_Word16 dfaNoisyQDomainOld;
 
     WebRtc_Word16 nearLogEnergy[MAX_BUF_LEN];
-    WebRtc_Word16 farLogEnergy[MAX_BUF_LEN];
+    WebRtc_Word16 farLogEnergy;
     WebRtc_Word16 echoAdaptLogEnergy[MAX_BUF_LEN];
     WebRtc_Word16 echoStoredLogEnergy[MAX_BUF_LEN];
 
-    WebRtc_Word16 channelAdapt16[PART_LEN1];
-    WebRtc_Word32 channelAdapt32[PART_LEN1];
-    WebRtc_Word16 channelStored[PART_LEN1];
+    // The extra 16 or 32 bytes in the following buffers are for alignment based Neon code.
+    // It's designed this way since the current GCC compiler can't align a buffer in 16 or 32
+    // byte boundaries properly.
+    WebRtc_Word16 channelStored_buf[PART_LEN1 + 8];
+    WebRtc_Word16 channelAdapt16_buf[PART_LEN1 + 8];
+    WebRtc_Word32 channelAdapt32_buf[PART_LEN1 + 8];
+    WebRtc_Word16 xBuf_buf[PART_LEN2 + 16]; // farend
+    WebRtc_Word16 dBufClean_buf[PART_LEN2 + 16]; // nearend
+    WebRtc_Word16 dBufNoisy_buf[PART_LEN2 + 16]; // nearend
+    WebRtc_Word16 outBuf_buf[PART_LEN + 8];
+
+    // Pointers to the above buffers
+    WebRtc_Word16 *channelStored;
+    WebRtc_Word16 *channelAdapt16;
+    WebRtc_Word32 *channelAdapt32;
+    WebRtc_Word16 *xBuf;
+    WebRtc_Word16 *dBufClean;
+    WebRtc_Word16 *dBufNoisy;
+    WebRtc_Word16 *outBuf;
+
     WebRtc_Word32 echoFilt[PART_LEN1];
     WebRtc_Word16 nearFilt[PART_LEN1];
     WebRtc_Word32 noiseEst[PART_LEN1];
-    WebRtc_Word16 noiseEstQDomain[PART_LEN1];
+    int           noiseEstTooLowCtr[PART_LEN1];
+    int           noiseEstTooHighCtr[PART_LEN1];
     WebRtc_Word16 noiseEstCtr;
     WebRtc_Word16 cngMode;
 
@@ -172,46 +183,19 @@
     WebRtc_Word16 farEnergyMaxMin;
     WebRtc_Word16 farEnergyVAD;
     WebRtc_Word16 farEnergyMSE;
-    WebRtc_Word16 currentVADValue;
+    int currentVADValue;
     WebRtc_Word16 vadUpdateCount;
 
-    WebRtc_Word16 delayHistogram[MAX_DELAY];
-    WebRtc_Word16 delayVadCount;
-    WebRtc_Word16 maxDelayHistIdx;
-    WebRtc_Word16 lastMinPos;
-
     WebRtc_Word16 startupState;
     WebRtc_Word16 mseChannelCount;
-    WebRtc_Word16 delayCount;
-    WebRtc_Word16 newDelayCorrData;
-    WebRtc_Word16 lastDelayUpdateCount;
-    WebRtc_Word16 delayCorrelation[CORR_BUF_LEN];
     WebRtc_Word16 supGain;
     WebRtc_Word16 supGainOld;
-    WebRtc_Word16 delayOffsetFlag;
 
     WebRtc_Word16 supGainErrParamA;
     WebRtc_Word16 supGainErrParamD;
     WebRtc_Word16 supGainErrParamDiffAB;
     WebRtc_Word16 supGainErrParamDiffBD;
 
-    // TODO(bjornv): Will be removed after final version has been committed.
-#ifdef VAD_DATA
-    FILE *vad_file;
-    FILE *delay_file;
-    FILE *far_file;
-    FILE *far_cur_file;
-    FILE *far_min_file;
-    FILE *far_max_file;
-    FILE *far_vad_file;
-#endif
-
-    // TODO(bjornv): Will be removed after final version has been committed.
-#ifdef STORE_CHANNEL_DATA
-    FILE *channel_file;
-    FILE *channel_file_init;
-#endif
-
 #ifdef AEC_DEBUG
     FILE *farFile;
     FILE *nearFile;
@@ -265,7 +249,20 @@
 //
 int WebRtcAecm_FreeCore(AecmCore_t *aecm);
 
-int WebRtcAecm_Control(AecmCore_t *aecm, int delay, int nlpFlag, int delayOffsetFlag);
+int WebRtcAecm_Control(AecmCore_t *aecm, int delay, int nlpFlag);
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_InitEchoPathCore(...)
+//
+// This function resets the echo channel adaptation with the specified channel.
+// Input:
+//      - aecm          : Pointer to the AECM instance
+//      - echo_path     : Pointer to the data that should initialize the echo path
+//
+// Output:
+//      - aecm          : Initialized instance
+//
+void WebRtcAecm_InitEchoPathCore(AecmCore_t* aecm, const WebRtc_Word16* echo_path);
 
 ///////////////////////////////////////////////////////////////////////////////////////////////
 // WebRtcAecm_ProcessFrame(...)
@@ -282,10 +279,10 @@
 //      - out           : Out buffer, one frame of nearend signal          :
 //
 //
-void WebRtcAecm_ProcessFrame(AecmCore_t * const aecm, const WebRtc_Word16 * const farend,
-                             const WebRtc_Word16 * const nearendNoisy,
-                             const WebRtc_Word16 * const nearendClean,
-                             WebRtc_Word16 * const out);
+int WebRtcAecm_ProcessFrame(AecmCore_t * aecm, const WebRtc_Word16 * farend,
+                            const WebRtc_Word16 * nearendNoisy,
+                            const WebRtc_Word16 * nearendClean,
+                            WebRtc_Word16 * out);
 
 ///////////////////////////////////////////////////////////////////////////////////////////////
 // WebRtcAecm_ProcessBlock(...)
@@ -303,10 +300,10 @@
 //      - out           : Out buffer, one block of nearend signal          :
 //
 //
-void WebRtcAecm_ProcessBlock(AecmCore_t * const aecm, const WebRtc_Word16 * const farend,
-                                const WebRtc_Word16 * const nearendNoisy,
-                                const WebRtc_Word16 * const noisyClean,
-                                WebRtc_Word16 * const out);
+int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, const WebRtc_Word16 * farend,
+                            const WebRtc_Word16 * nearendNoisy,
+                            const WebRtc_Word16 * noisyClean,
+                            WebRtc_Word16 * out);
 
 ///////////////////////////////////////////////////////////////////////////////////////////////
 // WebRtcAecm_BufferFarFrame()
@@ -335,4 +332,44 @@
 void WebRtcAecm_FetchFarFrame(AecmCore_t * const aecm, WebRtc_Word16 * const farend,
                               const int farLen, const int knownDelay);
 
+///////////////////////////////////////////////////////////////////////////////
+// Some function pointers, for internal functions shared by ARM NEON and 
+// generic C code.
+//
+typedef void (*CalcLinearEnergies)(
+    AecmCore_t* aecm,
+    const WebRtc_UWord16* far_spectrum,
+    WebRtc_Word32* echoEst,
+    WebRtc_UWord32* far_energy,
+    WebRtc_UWord32* echo_energy_adapt,
+    WebRtc_UWord32* echo_energy_stored);
+extern CalcLinearEnergies WebRtcAecm_CalcLinearEnergies;
+
+typedef void (*StoreAdaptiveChannel)(
+    AecmCore_t* aecm,
+    const WebRtc_UWord16* far_spectrum,
+    WebRtc_Word32* echo_est);
+extern StoreAdaptiveChannel WebRtcAecm_StoreAdaptiveChannel;
+
+typedef void (*ResetAdaptiveChannel)(AecmCore_t* aecm);
+extern ResetAdaptiveChannel WebRtcAecm_ResetAdaptiveChannel;
+
+typedef void (*WindowAndFFT)(
+    WebRtc_Word16* fft,
+    const WebRtc_Word16* time_signal,
+    complex16_t* freq_signal,
+    int time_signal_scaling);
+extern WindowAndFFT WebRtcAecm_WindowAndFFT;
+
+typedef void (*InverseFFTAndWindow)(
+    AecmCore_t* aecm,
+    WebRtc_Word16* fft, complex16_t* efw,
+    WebRtc_Word16* output,
+    const WebRtc_Word16* nearendClean);
+extern InverseFFTAndWindow WebRtcAecm_InverseFFTAndWindow;
+
+// Initialization of the above function pointers for ARM Neon.
+void WebRtcAecm_InitNeon(void);
+
+
 #endif
diff --git a/src/modules/audio_processing/aecm/aecm_core_neon.c b/src/modules/audio_processing/aecm/aecm_core_neon.c
new file mode 100644
index 0000000..ab448b4
--- /dev/null
+++ b/src/modules/audio_processing/aecm/aecm_core_neon.c
@@ -0,0 +1,303 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "aecm_core.h"
+
+#include <arm_neon.h>
+#include <assert.h>
+
+
+// Square root of Hanning window in Q14.
+static const WebRtc_Word16 kSqrtHanningReversed[] __attribute__((aligned(8))) = {
+  16384, 16373, 16354, 16325, 
+  16286, 16237, 16179, 16111,
+  16034, 15947, 15851, 15746,
+  15631, 15506, 15373, 15231,
+  15079, 14918, 14749, 14571,
+  14384, 14189, 13985, 13773,
+  13553, 13325, 13089, 12845,
+  12594, 12335, 12068, 11795,
+  11514, 11227, 10933, 10633,
+  10326, 10013, 9695,  9370,
+  9040,  8705,  8364,  8019,
+  7668,  7313,  6954,  6591,
+  6224,  5853,  5478,  5101,
+  4720,  4337,  3951,  3562,
+  3172,  2780,  2386,  1990,
+  1594,  1196,  798,   399
+};
+
+static void WindowAndFFTNeon(WebRtc_Word16* fft,
+                             const WebRtc_Word16* time_signal,
+                             complex16_t* freq_signal,
+                             int time_signal_scaling) {
+  int i, j;
+
+  int16x4_t tmp16x4_scaling = vdup_n_s16(time_signal_scaling);
+  __asm__("vmov.i16 d21, #0" ::: "d21");
+
+  for (i = 0, j = 0; i < PART_LEN; i += 4, j += 8) {
+    int16x4_t tmp16x4_0;
+    int16x4_t tmp16x4_1;
+    int32x4_t tmp32x4_0;
+
+    /* Window near end */
+    // fft[j] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((time_signal[i]
+    //       << time_signal_scaling), WebRtcAecm_kSqrtHanning[i], 14);
+    __asm__("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_0) : "r"(&time_signal[i]));
+    tmp16x4_0 = vshl_s16(tmp16x4_0, tmp16x4_scaling);
+
+    __asm__("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_1) : "r"(&WebRtcAecm_kSqrtHanning[i]));
+    tmp32x4_0 = vmull_s16(tmp16x4_0, tmp16x4_1);
+
+    __asm__("vshrn.i32 d20, %q0, #14" : : "w"(tmp32x4_0) : "d20");
+    __asm__("vst2.16 {d20, d21}, [%0, :128]" : : "r"(&fft[j]) : "q10");
+
+    // fft[PART_LEN2 + j] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(
+    //      (time_signal[PART_LEN + i] << time_signal_scaling),
+    //       WebRtcAecm_kSqrtHanning[PART_LEN - i], 14);
+    __asm__("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_0) : "r"(&time_signal[i + PART_LEN]));
+    tmp16x4_0 = vshl_s16(tmp16x4_0, tmp16x4_scaling);
+
+    __asm__("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_1) : "r"(&kSqrtHanningReversed[i]));
+    tmp32x4_0 = vmull_s16(tmp16x4_0, tmp16x4_1);
+
+    __asm__("vshrn.i32 d20, %q0, #14" : : "w"(tmp32x4_0) : "d20");
+    __asm__("vst2.16 {d20, d21}, [%0, :128]" : : "r"(&fft[PART_LEN2 + j]) : "q10");
+  }
+
+  WebRtcSpl_ComplexBitReverse(fft, PART_LEN_SHIFT);
+  WebRtcSpl_ComplexFFT(fft, PART_LEN_SHIFT, 1);
+
+  // Take only the first PART_LEN2 samples, and switch the sign of the imaginary part.
+  for (i = 0, j = 0; j < PART_LEN2; i += 8, j += 16) {
+    __asm__("vld2.16 {d20, d21, d22, d23}, [%0, :256]" : : "r"(&fft[j]) : "q10", "q11");
+    __asm__("vneg.s16 d22, d22" : : : "q10");
+    __asm__("vneg.s16 d23, d23" : : : "q11");
+    __asm__("vst2.16 {d20, d21, d22, d23}, [%0, :256]" : :
+            "r"(&freq_signal[i].real): "q10", "q11");
+  }
+}
+
+static void InverseFFTAndWindowNeon(AecmCore_t* aecm,
+                                    WebRtc_Word16* fft,
+                                    complex16_t* efw,
+                                    WebRtc_Word16* output,
+                                    const WebRtc_Word16* nearendClean) {
+  int i, j, outCFFT;
+  WebRtc_Word32 tmp32no1;
+
+  // Synthesis
+  for (i = 0, j = 0; i < PART_LEN; i += 4, j += 8) {
+    // We overwrite two more elements in fft[], but it's ok.
+    __asm__("vld2.16 {d20, d21}, [%0, :128]" : : "r"(&(efw[i].real)) : "q10");
+    __asm__("vmov q11, q10" : : : "q10", "q11");
+
+    __asm__("vneg.s16 d23, d23" : : : "q11");
+    __asm__("vst2.16 {d22, d23}, [%0, :128]" : : "r"(&fft[j]): "q11");
+
+    __asm__("vrev64.16 q10, q10" : : : "q10");
+    __asm__("vst2.16 {d20, d21}, [%0]" : : "r"(&fft[PART_LEN4 - j - 6]): "q10");
+  }
+
+  fft[PART_LEN2] = efw[PART_LEN].real;
+  fft[PART_LEN2 + 1] = -efw[PART_LEN].imag;
+
+  // Inverse FFT, result should be scaled with outCFFT.
+  WebRtcSpl_ComplexBitReverse(fft, PART_LEN_SHIFT);
+  outCFFT = WebRtcSpl_ComplexIFFT(fft, PART_LEN_SHIFT, 1);
+
+  // Take only the real values and scale with outCFFT.
+  for (i = 0, j = 0; i < PART_LEN2; i += 8, j += 16) {
+    __asm__("vld2.16 {d20, d21, d22, d23}, [%0, :256]" : : "r"(&fft[j]) : "q10", "q11");
+    __asm__("vst1.16 {d20, d21}, [%0, :128]" : : "r"(&fft[i]): "q10");
+  }
+
+  int32x4_t tmp32x4_2;
+  __asm__("vdup.32 %q0, %1" : "=w"(tmp32x4_2) : "r"((WebRtc_Word32)
+      (outCFFT - aecm->dfaCleanQDomain)));
+  for (i = 0; i < PART_LEN; i += 4) {
+    int16x4_t tmp16x4_0;
+    int16x4_t tmp16x4_1;
+    int32x4_t tmp32x4_0;
+    int32x4_t tmp32x4_1;
+
+    // fft[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+    //        fft[i], WebRtcAecm_kSqrtHanning[i], 14);
+    __asm__("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_0) : "r"(&fft[i]));
+    __asm__("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_1) : "r"(&WebRtcAecm_kSqrtHanning[i]));
+    __asm__("vmull.s16 %q0, %P1, %P2" : "=w"(tmp32x4_0) : "w"(tmp16x4_0), "w"(tmp16x4_1));
+    __asm__("vrshr.s32 %q0, %q1, #14" : "=w"(tmp32x4_0) : "0"(tmp32x4_0));
+
+    // tmp32no1 = WEBRTC_SPL_SHIFT_W32((WebRtc_Word32)fft[i],
+    //        outCFFT - aecm->dfaCleanQDomain);
+    __asm__("vshl.s32 %q0, %q1, %q2" : "=w"(tmp32x4_0) : "0"(tmp32x4_0), "w"(tmp32x4_2));
+
+    // fft[i] = (WebRtc_Word16)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX,
+    //        tmp32no1 + outBuf[i], WEBRTC_SPL_WORD16_MIN);
+    // output[i] = fft[i];
+    __asm__("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_0) : "r"(&aecm->outBuf[i]));
+    __asm__("vmovl.s16 %q0, %P1" : "=w"(tmp32x4_1) : "w"(tmp16x4_0));
+    __asm__("vadd.i32 %q0, %q1" : : "w"(tmp32x4_0), "w"(tmp32x4_1));
+    __asm__("vqshrn.s32 %P0, %q1, #0" : "=w"(tmp16x4_0) : "w"(tmp32x4_0));
+    __asm__("vst1.16 %P0, [%1, :64]" : : "w"(tmp16x4_0), "r"(&fft[i]));
+    __asm__("vst1.16 %P0, [%1, :64]" : : "w"(tmp16x4_0), "r"(&output[i]));
+
+    // tmp32no1 = WEBRTC_SPL_MUL_16_16_RSFT(
+    //        fft[PART_LEN + i], WebRtcAecm_kSqrtHanning[PART_LEN - i], 14);
+    __asm__("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_0) : "r"(&fft[PART_LEN + i]));
+    __asm__("vld1.16 %P0, [%1, :64]" : "=w"(tmp16x4_1) : "r"(&kSqrtHanningReversed[i]));
+    __asm__("vmull.s16 %q0, %P1, %P2" : "=w"(tmp32x4_0) : "w"(tmp16x4_0), "w"(tmp16x4_1));
+    __asm__("vshr.s32 %q0, %q1, #14" : "=w"(tmp32x4_0) : "0"(tmp32x4_0));
+
+    // tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, outCFFT - aecm->dfaCleanQDomain);
+    __asm__("vshl.s32 %q0, %q1, %q2" : "=w"(tmp32x4_0) : "0"(tmp32x4_0), "w"(tmp32x4_2));
+    // outBuf[i] = (WebRtc_Word16)WEBRTC_SPL_SAT(
+    //        WEBRTC_SPL_WORD16_MAX, tmp32no1, WEBRTC_SPL_WORD16_MIN);
+    __asm__("vqshrn.s32 %P0, %q1, #0" : "=w"(tmp16x4_0) : "w"(tmp32x4_0));
+    __asm__("vst1.16 %P0, [%1, :64]" : : "w"(tmp16x4_0), "r"(&aecm->outBuf[i]));
+  }
+
+  // Copy the current block to the old position (outBuf is shifted elsewhere).
+  for (i = 0; i < PART_LEN; i += 16) {
+    __asm__("vld1.16 {d20, d21, d22, d23}, [%0, :256]" : :
+            "r"(&aecm->xBuf[i + PART_LEN]) : "q10");
+    __asm__("vst1.16 {d20, d21, d22, d23}, [%0, :256]" : : "r"(&aecm->xBuf[i]): "q10");
+  }
+  for (i = 0; i < PART_LEN; i += 16) {
+    __asm__("vld1.16 {d20, d21, d22, d23}, [%0, :256]" : :
+            "r"(&aecm->dBufNoisy[i + PART_LEN]) : "q10");
+    __asm__("vst1.16 {d20, d21, d22, d23}, [%0, :256]" : :
+            "r"(&aecm->dBufNoisy[i]): "q10");
+  }
+  if (nearendClean != NULL) {
+    for (i = 0; i < PART_LEN; i += 16) {
+      __asm__("vld1.16 {d20, d21, d22, d23}, [%0, :256]" : :
+              "r"(&aecm->dBufClean[i + PART_LEN]) : "q10");
+      __asm__("vst1.16 {d20, d21, d22, d23}, [%0, :256]" : :
+              "r"(&aecm->dBufClean[i]): "q10");
+    }
+  }
+}
+
+static void CalcLinearEnergiesNeon(AecmCore_t* aecm,
+                                   const WebRtc_UWord16* far_spectrum,
+                                   WebRtc_Word32* echo_est,
+                                   WebRtc_UWord32* far_energy,
+                                   WebRtc_UWord32* echo_energy_adapt,
+                                   WebRtc_UWord32* echo_energy_stored) {
+  int i;
+
+  register WebRtc_UWord32 far_energy_r;
+  register WebRtc_UWord32 echo_energy_stored_r;
+  register WebRtc_UWord32 echo_energy_adapt_r;
+  uint32x4_t tmp32x4_0;
+
+  __asm__("vmov.i32 q14, #0" : : : "q14"); // far_energy
+  __asm__("vmov.i32 q8,  #0" : : : "q8"); // echo_energy_stored
+  __asm__("vmov.i32 q9,  #0" : : : "q9"); // echo_energy_adapt
+
+  for (i = 0; i < PART_LEN - 7; i += 8) {
+    // far_energy += (WebRtc_UWord32)(far_spectrum[i]);
+    __asm__("vld1.16 {d26, d27}, [%0]" : : "r"(&far_spectrum[i]) : "q13");
+    __asm__("vaddw.u16 q14, q14, d26" : : : "q14", "q13");
+    __asm__("vaddw.u16 q14, q14, d27" : : : "q14", "q13");
+
+    // Get estimated echo energies for adaptive channel and stored channel.
+    // echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], far_spectrum[i]);
+    __asm__("vld1.16 {d24, d25}, [%0, :128]" : : "r"(&aecm->channelStored[i]) : "q12");
+    __asm__("vmull.u16 q10, d26, d24" : : : "q12", "q13", "q10");
+    __asm__("vmull.u16 q11, d27, d25" : : : "q12", "q13", "q11");
+    __asm__("vst1.32 {d20, d21, d22, d23}, [%0, :256]" : : "r"(&echo_est[i]):
+            "q10", "q11");
+
+    // echo_energy_stored += (WebRtc_UWord32)echoEst[i];
+    __asm__("vadd.u32 q8, q10" : : : "q10", "q8");
+    __asm__("vadd.u32 q8, q11" : : : "q11", "q8");
+
+    // echo_energy_adapt += WEBRTC_SPL_UMUL_16_16(
+    //     aecm->channelAdapt16[i], far_spectrum[i]);
+    __asm__("vld1.16 {d24, d25}, [%0, :128]" : : "r"(&aecm->channelAdapt16[i]) : "q12");
+    __asm__("vmull.u16 q10, d26, d24" : : : "q12", "q13", "q10");
+    __asm__("vmull.u16 q11, d27, d25" : : : "q12", "q13", "q11");
+    __asm__("vadd.u32 q9, q10" : : : "q9", "q15");
+    __asm__("vadd.u32 q9, q11" : : : "q9", "q11");
+  }
+
+  __asm__("vadd.u32 d28, d29" : : : "q14");
+  __asm__("vpadd.u32 d28, d28" : : : "q14");
+  __asm__("vmov.32 %0, d28[0]" : "=r"(far_energy_r): : "q14");
+
+  __asm__("vadd.u32 d18, d19" : : : "q9");
+  __asm__("vpadd.u32 d18, d18" : : : "q9");
+  __asm__("vmov.32 %0, d18[0]" : "=r"(echo_energy_adapt_r): : "q9");
+
+  __asm__("vadd.u32 d16, d17" : : : "q8");
+  __asm__("vpadd.u32 d16, d16" : : : "q8");
+  __asm__("vmov.32 %0, d16[0]" : "=r"(echo_energy_stored_r): : "q8");
+
+  // Get estimated echo energies for adaptive channel and stored channel.
+  echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], far_spectrum[i]);
+  *echo_energy_stored = echo_energy_stored_r + (WebRtc_UWord32)echo_est[i];
+  *far_energy = far_energy_r + (WebRtc_UWord32)(far_spectrum[i]);
+  *echo_energy_adapt = echo_energy_adapt_r + WEBRTC_SPL_UMUL_16_16(
+      aecm->channelAdapt16[i], far_spectrum[i]);
+}
+
+static void StoreAdaptiveChannelNeon(AecmCore_t* aecm,
+                                     const WebRtc_UWord16* far_spectrum,
+                                     WebRtc_Word32* echo_est) {
+  int i;
+
+  // During startup we store the channel every block.
+  // Recalculate echo estimate.
+  for (i = 0; i < PART_LEN - 7; i += 8) {
+    // aecm->channelStored[i] = acem->channelAdapt16[i];
+    // echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], far_spectrum[i]);
+    __asm__("vld1.16 {d26, d27}, [%0]" : : "r"(&far_spectrum[i]) : "q13");
+    __asm__("vld1.16 {d24, d25}, [%0, :128]" : : "r"(&aecm->channelAdapt16[i]) : "q12");
+    __asm__("vst1.16 {d24, d25}, [%0, :128]" : : "r"(&aecm->channelStored[i]) : "q12");
+    __asm__("vmull.u16 q10, d26, d24" : : : "q12", "q13", "q10");
+    __asm__("vmull.u16 q11, d27, d25" : : : "q12", "q13", "q11");
+    __asm__("vst1.16 {d20, d21, d22, d23}, [%0, :256]" : :
+            "r"(&echo_est[i]) : "q10", "q11");
+  }
+  aecm->channelStored[i] = aecm->channelAdapt16[i];
+  echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], far_spectrum[i]);
+}
+
+static void ResetAdaptiveChannelNeon(AecmCore_t* aecm) {
+  int i;
+
+  for (i = 0; i < PART_LEN - 7; i += 8) {
+    // aecm->channelAdapt16[i] = aecm->channelStored[i];
+    // aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)
+    //                           aecm->channelStored[i], 16);
+    __asm__("vld1.16 {d24, d25}, [%0, :128]" : :
+            "r"(&aecm->channelStored[i]) : "q12");
+    __asm__("vst1.16 {d24, d25}, [%0, :128]" : :
+            "r"(&aecm->channelAdapt16[i]) : "q12");
+    __asm__("vshll.s16 q10, d24, #16" : : : "q12", "q13", "q10");
+    __asm__("vshll.s16 q11, d25, #16" : : : "q12", "q13", "q11");
+    __asm__("vst1.16 {d20, d21, d22, d23}, [%0, :256]" : :
+            "r"(&aecm->channelAdapt32[i]): "q10", "q11");
+  }
+  aecm->channelAdapt16[i] = aecm->channelStored[i];
+  aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32(
+      (WebRtc_Word32)aecm->channelStored[i], 16);
+}
+
+void WebRtcAecm_InitNeon(void) {
+  WebRtcAecm_WindowAndFFT = WindowAndFFTNeon;
+  WebRtcAecm_InverseFFTAndWindow = InverseFFTAndWindowNeon;
+  WebRtcAecm_CalcLinearEnergies = CalcLinearEnergiesNeon;
+  WebRtcAecm_StoreAdaptiveChannel = StoreAdaptiveChannelNeon;
+  WebRtcAecm_ResetAdaptiveChannel = ResetAdaptiveChannelNeon;
+}
diff --git a/src/modules/audio_processing/aecm/main/source/echo_control_mobile.c b/src/modules/audio_processing/aecm/echo_control_mobile.c
similarity index 83%
rename from src/modules/audio_processing/aecm/main/source/echo_control_mobile.c
rename to src/modules/audio_processing/aecm/echo_control_mobile.c
index f9d84f0..49798b7 100644
--- a/src/modules/audio_processing/aecm/main/source/echo_control_mobile.c
+++ b/src/modules/audio_processing/aecm/echo_control_mobile.c
@@ -31,7 +31,7 @@
 // The factor of 2 handles wb, and the + 1 is as a safety margin
 #define MAX_RESAMP_LEN (5 * FRAME_LEN)
 
-static const int kBufSizeSamp = BUF_SIZE_FRAMES * FRAME_LEN; // buffer size (samples)
+static const size_t kBufSizeSamp = BUF_SIZE_FRAMES * FRAME_LEN; // buffer size (samples)
 static const int kSampMsNb = 8; // samples per ms in nb
 // Target suppression levels for nlp modes
 // log{0.001, 0.00001, 0.00000001}
@@ -108,7 +108,8 @@
         return -1;
     }
 
-    if (WebRtcApm_CreateBuffer(&aecm->farendBuf, kBufSizeSamp) == -1)
+    if (WebRtc_CreateBuffer(&aecm->farendBuf, kBufSizeSamp,
+                            sizeof(int16_t)) == -1)
     {
         WebRtcAecm_Free(aecm);
         aecm = NULL;
@@ -153,13 +154,13 @@
     fclose(aecm->postCompFile);
 #endif // AEC_DEBUG
     WebRtcAecm_FreeCore(aecm->aecmCore);
-    WebRtcApm_FreeBuffer(aecm->farendBuf);
+    WebRtc_FreeBuffer(aecm->farendBuf);
     free(aecm);
 
     return 0;
 }
 
-WebRtc_Word32 WebRtcAecm_Init(void *aecmInst, WebRtc_Word32 sampFreq, WebRtc_Word32 scSampFreq)
+WebRtc_Word32 WebRtcAecm_Init(void *aecmInst, WebRtc_Word32 sampFreq)
 {
     aecmob_t *aecm = aecmInst;
     AecmConfig aecConfig;
@@ -176,13 +177,6 @@
     }
     aecm->sampFreq = sampFreq;
 
-    if (scSampFreq < 1 || scSampFreq > 96000)
-    {
-        aecm->lastError = AECM_BAD_PARAMETER_ERROR;
-        return -1;
-    }
-    aecm->scSampFreq = scSampFreq;
-
     // Initialize AECM core
     if (WebRtcAecm_InitCore(aecm->aecmCore, aecm->sampFreq) == -1)
     {
@@ -191,7 +185,7 @@
     }
 
     // Initialize farend buffer
-    if (WebRtcApm_InitBuffer(aecm->farendBuf) == -1)
+    if (WebRtc_InitBuffer(aecm->farendBuf) == -1)
     {
         aecm->lastError = AECM_UNSPECIFIED_ERROR;
         return -1;
@@ -264,7 +258,7 @@
         WebRtcAecm_DelayComp(aecm);
     }
 
-    WebRtcApm_WriteBuffer(aecm->farendBuf, farend, nrOfSamples);
+    WebRtc_WriteBuffer(aecm->farendBuf, farend, (size_t) nrOfSamples);
 
     return retVal;
 }
@@ -276,7 +270,6 @@
     aecmob_t *aecm = aecmInst;
     WebRtc_Word32 retVal = 0;
     short i;
-    short farend[FRAME_LEN];
     short nmbrOfFilledBuffers;
     short nBlocks10ms;
     short nFrames;
@@ -352,7 +345,8 @@
             memcpy(out, nearendClean, sizeof(short) * nrOfSamples);
         }
 
-        nmbrOfFilledBuffers = WebRtcApm_get_buffer_size(aecm->farendBuf) / FRAME_LEN;
+        nmbrOfFilledBuffers =
+            (short) WebRtc_available_read(aecm->farendBuf) / FRAME_LEN;
         // The AECM is in the start up mode
         // AECM is disabled until the soundcard buffer and farend buffers are OK
 
@@ -414,10 +408,9 @@
                 aecm->ECstartup = 0; // Enable the AECM
             } else if (nmbrOfFilledBuffers > aecm->bufSizeStart)
             {
-                WebRtcApm_FlushBuffer(
-                                       aecm->farendBuf,
-                                       WebRtcApm_get_buffer_size(aecm->farendBuf)
-                                               - aecm->bufSizeStart * FRAME_LEN);
+                WebRtc_MoveReadPtr(aecm->farendBuf,
+                                   (int) WebRtc_available_read(aecm->farendBuf)
+                                   - (int) aecm->bufSizeStart * FRAME_LEN);
                 aecm->ECstartup = 0;
             }
         }
@@ -429,20 +422,27 @@
         // Note only 1 block supported for nb and 2 blocks for wb
         for (i = 0; i < nFrames; i++)
         {
-            nmbrOfFilledBuffers = WebRtcApm_get_buffer_size(aecm->farendBuf) / FRAME_LEN;
+            int16_t farend[FRAME_LEN];
+            const int16_t* farend_ptr = NULL;
+
+            nmbrOfFilledBuffers =
+                (short) WebRtc_available_read(aecm->farendBuf) / FRAME_LEN;
 
             // Check that there is data in the far end buffer
             if (nmbrOfFilledBuffers > 0)
             {
                 // Get the next 80 samples from the farend buffer
-                WebRtcApm_ReadBuffer(aecm->farendBuf, farend, FRAME_LEN);
+                WebRtc_ReadBuffer(aecm->farendBuf, (void**) &farend_ptr, farend,
+                                  FRAME_LEN);
 
                 // Always store the last frame for use when we run out of data
-                memcpy(&(aecm->farendOld[i][0]), farend, FRAME_LEN * sizeof(short));
+                memcpy(&(aecm->farendOld[i][0]), farend_ptr,
+                       FRAME_LEN * sizeof(short));
             } else
             {
                 // We have no data so we use the last played frame
                 memcpy(farend, &(aecm->farendOld[i][0]), FRAME_LEN * sizeof(short));
+                farend_ptr = farend;
             }
 
             // Call buffer delay estimator when all data is extracted,
@@ -465,12 +465,24 @@
              &out[FRAME_LEN * i], aecm->knownDelay);*/
             if (nearendClean == NULL)
             {
-                WebRtcAecm_ProcessFrame(aecm->aecmCore, farend, &nearendNoisy[FRAME_LEN * i],
-                                        NULL, &out[FRAME_LEN * i]);
+                if (WebRtcAecm_ProcessFrame(aecm->aecmCore,
+                                            farend_ptr,
+                                            &nearendNoisy[FRAME_LEN * i],
+                                            NULL,
+                                            &out[FRAME_LEN * i]) == -1)
+                {
+                    return -1;
+                }
             } else
             {
-                WebRtcAecm_ProcessFrame(aecm->aecmCore, farend, &nearendNoisy[FRAME_LEN * i],
-                                        &nearendClean[FRAME_LEN * i], &out[FRAME_LEN * i]);
+                if (WebRtcAecm_ProcessFrame(aecm->aecmCore,
+                                            farend_ptr,
+                                            &nearendNoisy[FRAME_LEN * i],
+                                            &nearendClean[FRAME_LEN * i],
+                                            &out[FRAME_LEN * i]) == -1)
+                {
+                    return -1;
+                }
             }
 
 #ifdef ARM_WINM_LOG
@@ -510,7 +522,8 @@
     }
 
 #ifdef AEC_DEBUG
-    msInAECBuf = WebRtcApm_get_buffer_size(aecm->farendBuf) / (kSampMsNb*aecm->aecmCore->mult);
+    msInAECBuf = (short) WebRtc_available_read(aecm->farendBuf) /
+        (kSampMsNb * aecm->aecmCore->mult);
     fwrite(&msInAECBuf, 2, 1, aecm->bufFile);
     fwrite(&(aecm->knownDelay), sizeof(aecm->knownDelay), 1, aecm->delayFile);
 #endif
@@ -627,6 +640,68 @@
     return 0;
 }
 
+WebRtc_Word32 WebRtcAecm_InitEchoPath(void* aecmInst,
+                                      const void* echo_path,
+                                      size_t size_bytes)
+{
+    aecmob_t *aecm = aecmInst;
+    const WebRtc_Word16* echo_path_ptr = echo_path;
+
+    if ((aecm == NULL) || (echo_path == NULL))
+    {
+        aecm->lastError = AECM_NULL_POINTER_ERROR;
+        return -1;
+    }
+    if (size_bytes != WebRtcAecm_echo_path_size_bytes())
+    {
+        // Input channel size does not match the size of AECM
+        aecm->lastError = AECM_BAD_PARAMETER_ERROR;
+        return -1;
+    }
+    if (aecm->initFlag != kInitCheck)
+    {
+        aecm->lastError = AECM_UNINITIALIZED_ERROR;
+        return -1;
+    }
+
+    WebRtcAecm_InitEchoPathCore(aecm->aecmCore, echo_path_ptr);
+
+    return 0;
+}
+
+WebRtc_Word32 WebRtcAecm_GetEchoPath(void* aecmInst,
+                                     void* echo_path,
+                                     size_t size_bytes)
+{
+    aecmob_t *aecm = aecmInst;
+    WebRtc_Word16* echo_path_ptr = echo_path;
+
+    if ((aecm == NULL) || (echo_path == NULL))
+    {
+        aecm->lastError = AECM_NULL_POINTER_ERROR;
+        return -1;
+    }
+    if (size_bytes != WebRtcAecm_echo_path_size_bytes())
+    {
+        // Input channel size does not match the size of AECM
+        aecm->lastError = AECM_BAD_PARAMETER_ERROR;
+        return -1;
+    }
+    if (aecm->initFlag != kInitCheck)
+    {
+        aecm->lastError = AECM_UNINITIALIZED_ERROR;
+        return -1;
+    }
+
+    memcpy(echo_path_ptr, aecm->aecmCore->channelStored, size_bytes);
+    return 0;
+}
+
+size_t WebRtcAecm_echo_path_size_bytes()
+{
+    return (PART_LEN1 * sizeof(WebRtc_Word16));
+}
+
 WebRtc_Word32 WebRtcAecm_get_version(WebRtc_Word8 *versionStr, WebRtc_Word16 len)
 {
     const char version[] = "AECM 1.2.0";
@@ -660,17 +735,17 @@
 
 static int WebRtcAecm_EstBufDelay(aecmob_t *aecm, short msInSndCardBuf)
 {
-    short delayNew, nSampFar, nSampSndCard;
+    short delayNew, nSampSndCard;
+    short nSampFar = (short) WebRtc_available_read(aecm->farendBuf);
     short diff;
 
-    nSampFar = WebRtcApm_get_buffer_size(aecm->farendBuf);
     nSampSndCard = msInSndCardBuf * kSampMsNb * aecm->aecmCore->mult;
 
     delayNew = nSampSndCard - nSampFar;
 
     if (delayNew < FRAME_LEN)
     {
-        WebRtcApm_FlushBuffer(aecm->farendBuf, FRAME_LEN);
+        WebRtc_MoveReadPtr(aecm->farendBuf, FRAME_LEN);
         delayNew += FRAME_LEN;
     }
 
@@ -710,10 +785,10 @@
 
 static int WebRtcAecm_DelayComp(aecmob_t *aecm)
 {
-    int nSampFar, nSampSndCard, delayNew, nSampAdd;
+    int nSampFar = (int) WebRtc_available_read(aecm->farendBuf);
+    int nSampSndCard, delayNew, nSampAdd;
     const int maxStuffSamp = 10 * FRAME_LEN;
 
-    nSampFar = WebRtcApm_get_buffer_size(aecm->farendBuf);
     nSampSndCard = aecm->msInSndCardBuf * kSampMsNb * aecm->aecmCore->mult;
     delayNew = nSampSndCard - nSampFar;
 
@@ -725,7 +800,7 @@
                 FRAME_LEN));
         nSampAdd = WEBRTC_SPL_MIN(nSampAdd, maxStuffSamp);
 
-        WebRtcApm_StuffBuffer(aecm->farendBuf, nSampAdd);
+        WebRtc_MoveReadPtr(aecm->farendBuf, -nSampAdd);
         aecm->delayChange = 1; // the delay needs to be updated
     }
 
diff --git a/src/modules/audio_processing/aecm/main/interface/echo_control_mobile.h b/src/modules/audio_processing/aecm/interface/echo_control_mobile.h
similarity index 80%
rename from src/modules/audio_processing/aecm/main/interface/echo_control_mobile.h
rename to src/modules/audio_processing/aecm/interface/echo_control_mobile.h
index 26b1172..30bea7a 100644
--- a/src/modules/audio_processing/aecm/main/interface/echo_control_mobile.h
+++ b/src/modules/audio_processing/aecm/interface/echo_control_mobile.h
@@ -74,16 +74,14 @@
  * -------------------------------------------------------------------
  * void           *aecmInst     Pointer to the AECM instance
  * WebRtc_Word32  sampFreq      Sampling frequency of data
- * WebRtc_Word32  scSampFreq    Soundcard sampling frequency
  *
  * Outputs                      Description
  * -------------------------------------------------------------------
- * WebRtc_Word32  return          0: OK
+ * WebRtc_Word32  return        0: OK
  *                             -1: error
  */
 WebRtc_Word32 WebRtcAecm_Init(void* aecmInst,
-                              WebRtc_Word32 sampFreq,
-                              WebRtc_Word32 scSampFreq);
+                              WebRtc_Word32 sampFreq);
 
 /*
  * Inserts an 80 or 160 sample block of data into the farend buffer.
@@ -171,6 +169,52 @@
                                     AecmConfig *config);
 
 /*
+ * This function enables the user to set the echo path on-the-fly.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*        aecmInst        Pointer to the AECM instance
+ * void*        echo_path       Pointer to the echo path to be set
+ * size_t       size_bytes      Size in bytes of the echo path
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * WebRtc_Word32  return        0: OK
+ *                             -1: error
+ */
+WebRtc_Word32 WebRtcAecm_InitEchoPath(void* aecmInst,
+                                      const void* echo_path,
+                                      size_t size_bytes);
+
+/*
+ * This function enables the user to get the currently used echo path
+ * on-the-fly
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*        aecmInst        Pointer to the AECM instance
+ * void*        echo_path       Pointer to echo path
+ * size_t       size_bytes      Size in bytes of the echo path
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * WebRtc_Word32  return        0: OK
+ *                             -1: error
+ */
+WebRtc_Word32 WebRtcAecm_GetEchoPath(void* aecmInst,
+                                     void* echo_path,
+                                     size_t size_bytes);
+
+/*
+ * This function enables the user to get the echo path size in bytes
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * size_t       return           : size in bytes
+ */
+size_t WebRtcAecm_echo_path_size_bytes();
+
+/*
  * Gets the last error code.
  *
  * Inputs                       Description
diff --git a/src/modules/audio_processing/aecm/main/matlab/compsup.m b/src/modules/audio_processing/aecm/main/matlab/compsup.m
deleted file mode 100644
index 9575ec4..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/compsup.m
+++ /dev/null
@@ -1,447 +0,0 @@
-function [emicrophone,aaa]=compsup(microphone,TheFarEnd,avtime,samplingfreq);
-% microphone = microphone signal
-% aaa = nonlinearity input variable
-% TheFarEnd = far end signal
-% avtime = interval to compute suppression from (seconds)
-% samplingfreq = sampling frequency
-
-%if(nargin==6)
-%    fprintf(1,'suppress has received a delay sequence\n');
-%end
-
-
-Ap500=[  1.00, -4.95, 9.801, -9.70299, 4.80298005, -0.9509900499];
-Bp500=[  0.662743088639636, -2.5841655608125, 3.77668102146288, -2.45182477425154, 0.596566274575251, 0.0];
-
-
-Ap200=[ 1.00, -4.875, 9.50625, -9.26859375, 4.518439453125, -0.881095693359375];
-Bp200=[ 0.862545460994275, -3.2832804496114, 4.67892032308828, -2.95798023879133, 0.699796870041299, 0.0];
-
-maxDelay=0.4; %[s]
-histLen=1; %[s]
-
-
-% CONSTANTS THAT YOU CAN EXPERIMENT WITH
-A_GAIN=10.0;	 	% for the suppress case
-oversampling = 2;	% must be power of 2; minimum is 2; 4 works
-% fine for support=64, but for support=128,
-% 8 gives better results.
-support=64; %512	% fft support (frequency resolution; at low
-% settings you can hear more distortion
-% (e.g. pitch that is left-over from far-end))
-% 128 works well, 64 is ok)
-
-lowlevel = mean(abs(microphone))*0.0001;
-
-G_ol = 0;  % Use overlapping sets of estimates
-
-% ECHO SUPPRESSION SPECIFIC PARAMETERS
-suppress_overdrive=1.0;   % overdrive factor for suppression 1.4 is good
-gamma_echo=1.0;           % same as suppress_overdrive but at different place
-de_echo_bound=0.0;
-mLim=10;                  % rank of matrix G
-%limBW = 1;		  % use bandwidth-limited response for G
-if mLim > (support/2+1)
-    error('mLim in suppress.m too large\n');
-end
-
-
-dynrange=1.0000e-004;
-
-% other, constants
-hsupport = support/2;
-hsupport1 = hsupport+1;
-factor =  2 / oversampling;
-updatel = support/oversampling;
-win=sqrt(designwindow(0,support));
-estLen = round(avtime * samplingfreq/updatel)
-
-runningfmean =0.0;
-
-mLim = floor(hsupport1/2);
-V = sqrt(2/hsupport1)*cos(pi/hsupport1*(repmat((0:hsupport1-1) + 0.5, mLim, 1).* ...
-    repmat((0:mLim-1)' + 0.5, 1, hsupport1)));
-
-fprintf(1,'updatel is %5.3f s\n', updatel/samplingfreq);
-
-
-
-bandfirst=8; bandlast=25;
-dosmooth=0;  % to get rid of wavy bin counts (can be worse or better)
-
-% compute some constants
-blockLen = support/oversampling;
-maxDelayb = floor(samplingfreq*maxDelay/updatel); % in blocks
-histLenb = floor(samplingfreq*histLen/updatel); % in blocks
-
-x0=TheFarEnd;
-y0=microphone;
-
-
-%input
-tlength=min([length(microphone),length(TheFarEnd)]);
-updateno=floor(tlength/updatel);
-tlength=updatel*updateno;
-updateno = updateno - oversampling + 1;
-
-TheFarEnd =TheFarEnd(1:tlength);
-microphone =microphone(1:tlength);
-
-TheFarEnd =[zeros(hsupport,1);TheFarEnd(1:tlength)];
-microphone =[zeros(hsupport,1);microphone(1:tlength)];
-
-
-% signal length
-n = min([floor(length(x0)/support)*support,floor(length(y0)/support)*support]);
-nb = n/blockLen - oversampling + 1; % in blocks
-
-% initialize space
-win = sqrt([0 ; hanning(support-1)]);
-sxAll2 = zeros(hsupport1,nb);
-syAll2 = zeros(hsupport1,nb);
-
-z500=zeros(5,maxDelayb+1);
-z200=zeros(5,hsupport1);
-
-bxspectrum=uint32(zeros(nb,1));
-bxhist=uint32(zeros(maxDelayb+1,1));
-byspectrum=uint32(zeros(nb,1));
-bcount=zeros(1+maxDelayb,nb);
-fcount=zeros(1+maxDelayb,nb);
-fout=zeros(1+maxDelayb,nb);
-delay=zeros(nb,1);
-tdelay=zeros(nb,1);
-nlgains=zeros(nb,1);
-
-% create space (mainly for debugging)
-emicrophone=zeros(tlength,1);
-femicrophone=complex(zeros(hsupport1,updateno));
-thefilter=zeros(hsupport1,updateno);
-thelimiter=ones(hsupport1,updateno);
-fTheFarEnd=complex(zeros(hsupport1,updateno));
-afTheFarEnd=zeros(hsupport1,updateno);
-fmicrophone=complex(zeros(hsupport1,updateno));
-afmicrophone=zeros(hsupport1,updateno);
-
-G = zeros(hsupport1, hsupport1);
-zerovec = zeros(hsupport1,1);
-zeromat = zeros(hsupport1);
-
-% Reset sums
-mmxs_a = zerovec;
-mmys_a = zerovec;
-s2xs_a = zerovec;
-s2ys_a = zerovec;
-Rxxs_a = zeromat;
-Ryxs_a = zeromat;
-count_a = 1;
-
-mmxs_b = zerovec;
-mmys_b = zerovec;
-s2xs_b = zerovec;
-s2ys_b = zerovec;
-Rxxs_b = zeromat;
-Ryxs_b = zeromat;
-count_b = 1;
-
-nog=0;
-
-aaa=zeros(size(TheFarEnd));
-
-% loop over signal blocks
-fprintf(1,'.. Suppression; averaging G over %5.1f seconds; file length %5.1f seconds ..\n',avtime, length(microphone)/samplingfreq);
-fprintf(1,'.. SUPPRESSING ONLY AFTER %5.1f SECONDS! ..\n',avtime);
-fprintf(1,'.. 20 seconds is good ..\n');
-hh = waitbar_j(0,'Please wait...');
-
-
-for i=1:updateno
-
-    sb = (i-1)*updatel + 1;
-    se=sb+support-1;
-    
-    % analysis FFTs
-    temp=fft(win .* TheFarEnd(sb:se));
-    fTheFarEnd(:,i)=temp(1:hsupport1);
-    xf=fTheFarEnd(:,i);
-    afTheFarEnd(:,i)= abs(fTheFarEnd(:,i));
-    
-    temp=win .* microphone(sb:se);
-    
-    temp=fft(win .* microphone(sb:se));
-    fmicrophone(:,i)=temp(1:hsupport1);
-    yf=fmicrophone(:,i);
-    afmicrophone(:,i)= abs(fmicrophone(:,i));
-
-    
-    ener_orig = afmicrophone(:,i)'*afmicrophone(:,i);
-    if( ener_orig == 0)
-        afmicrophone(:,i)=lowlevel*ones(size(afmicrophone(:,i)));
-    end
-    
-    
-    	% use log domain (showed improved performance)
-xxf= sqrt(real(xf.*conj(xf))+1e-20);
-yyf= sqrt(real(yf.*conj(yf))+1e-20);
-        sxAll2(:,i) = 20*log10(xxf);
-	syAll2(:,i) = 20*log10(yyf);
-
-       mD=min(i-1,maxDelayb);
-      xthreshold = sum(sxAll2(:,i-mD:i),2)/(maxDelayb+1);
-
-      [yout, z200] = filter(Bp200,Ap200,syAll2(:,i),z200,2);
-      yout=yout/(maxDelayb+1);
-      ythreshold = mean(syAll2(:,i-mD:i),2);
-      
-
-  bxspectrum(i)=getBspectrum(sxAll2(:,i),xthreshold,bandfirst,bandlast);
-  byspectrum(i)=getBspectrum(syAll2(:,i),yout,bandfirst,bandlast);
-
-  bxhist(end-mD:end)=bxspectrum(i-mD:i);
-  
-  bcount(:,i)=hisser2( ...
-     byspectrum(i),flipud(bxhist),bandfirst,bandlast);
- 
- 
-  [fout(:,i), z500] = filter(Bp500,Ap500,bcount(:,i),z500,2);
-  fcount(:,i)=sum(bcount(:,max(1,i-histLenb+1):i),2); % using the history range
- fout(:,i)=round(fout(:,i)); 
-  [value,delay(i)]=min(fout(:,i),[],1);
-  tdelay(i)=(delay(i)-1)*support/(samplingfreq*oversampling);
-
-    % compensate
-
-    idel =  max(i - delay(i) + 1,1);
-    
-  
-    % echo suppression
-    
-    noisyspec = afmicrophone(:,i);
-    
-    % Estimate G using covariance matrices
-    
-    % Cumulative estimates    
-    xx = afTheFarEnd(:,idel);
-    yy = afmicrophone(:,i);
-    
-    % Means
-    mmxs_a = mmxs_a + xx;
-    mmys_a = mmys_a + yy;
-    if (G_ol)
-        mmxs_b = mmxs_b + xx;  
-        mmys_b = mmys_b + yy;
-        mmy = mean([mmys_a/count_a mmys_b/count_b],2);
-        mmx = mean([mmxs_a/count_a mmxs_b/count_b],2);
-    else
-        mmx = mmxs_a/count_a;   
-        mmy = mmys_a/count_a;   
-    end
-    count_a = count_a + 1;
-    count_b = count_b + 1;
-    
-    % Mean removal
-    xxm = xx - mmx;
-    yym = yy - mmy;
-    
-    % Variances
-    s2xs_a = s2xs_a +  xxm .* xxm;
-    s2ys_a = s2ys_a +  yym .* yym;
-    s2xs_b = s2xs_b +  xxm .* xxm;
-    s2ys_b = s2ys_b +  yym .* yym;
-    
-    % Correlation matrices  
-    Rxxs_a = Rxxs_a + xxm * xxm';
-    Ryxs_a = Ryxs_a + yym * xxm';
-    Rxxs_b = Rxxs_b + xxm * xxm';
-    Ryxs_b = Ryxs_b + yym * xxm';
-    
-    
-    % Gain matrix A
-    
-    if mod(i, estLen) == 0
-        
-        
-        % Cumulative based estimates
-        Rxxf = Rxxs_a / (estLen - 1);
-        Ryxf = Ryxs_a / (estLen - 1);
-        
-        % Variance normalization
-        s2x2 = s2xs_a / (estLen - 1);
-        s2x2 = sqrt(s2x2);
-       % Sx = diag(max(s2x2,dynrange*max(s2x2)));
-        Sx = diag(s2x2);
-        if (sum(s2x2) > 0)
-          iSx = inv(Sx);
-         else
-                 iSx= Sx + 0.01;
-         end
-             
-        s2y2 = s2ys_a / (estLen - 1);
-        s2y2 = sqrt(s2y2);
-       % Sy = diag(max(s2y2,dynrange*max(s2y2)));
-        Sy = diag(s2y2);
-        iSy = inv(Sy);        
-        rx = iSx * Rxxf * iSx;
-        ryx = iSy * Ryxf * iSx;
-        
-     
-        
-        dbd= 7; % Us less than the full matrix
-        
-        % k x m
-        % Bandlimited structure on G
-        LSEon = 0; % Default is using MMSE
-        if (LSEon)
-            ryx = ryx*rx;
-            rx = rx*rx;
-        end
-        p = dbd-1;
-        gaj = min(min(hsupport1,2*p+1),min([p+(1:hsupport1); hsupport1+p+1-(1:hsupport1)]));
-        cgaj = [0 cumsum(gaj)];
-        
-        G3 = zeros(hsupport1);
-        for kk=1:hsupport1
-            ki = max(0,kk-p-1);
-            if (sum(sum(rx(ki+1:ki+gaj(kk),ki+1:ki+gaj(kk))))>0)
-               G3(kk,ki+1:ki+gaj(kk)) = ryx(kk,ki+1:ki+gaj(kk))/rx(ki+1:ki+gaj(kk),ki+1:ki+gaj(kk));
-           else
-               G3(kk,ki+1:ki+gaj(kk)) = ryx(kk,ki+1:ki+gaj(kk));
-           end
-        end
-        % End Bandlimited structure
-        
-        G = G3;
-        G(abs(G)<0.01)=0;
-        G = suppress_overdrive * Sy * G * iSx;
-        
-        if 1
-            figure(32); mi=2;
-            surf(max(min(G,mi),-mi)); view(2)
-            title('Unscaled Masked Limited-bandwidth G');
-        end
-        pause(0.05);
-        
-        % Reset sums
-        mmxs_a = zerovec;
-        mmys_a = zerovec;
-        s2xs_a = zerovec;
-        s2ys_a = zerovec;
-        Rxxs_a = zeromat;
-        Ryxs_a = zeromat;
-        count_a = 1;
-        
-    end
-    
-    if (G_ol)    
-        % Gain matrix B
-        
-        if ((mod((i-estLen/2), estLen) == 0) & i>estLen)
-            
-            
-            % Cumulative based estimates
-            Rxxf = Rxxs_b / (estLen - 1);
-            Ryxf = Ryxs_b / (estLen - 1);
-            
-            % Variance normalization
-            s2x2 = s2xs_b / (estLen - 1);
-            s2x2 = sqrt(s2x2);
-            Sx = diag(max(s2x2,dynrange*max(s2x2)));
-            iSx = inv(Sx);
-            s2y2 = s2ys_b / (estLen - 1);
-            s2y2 = sqrt(s2y2);
-            Sy = diag(max(s2y2,dynrange*max(s2y2)));
-            iSy = inv(Sy);        
-            rx = iSx * Rxxf * iSx;
-            ryx = iSy * Ryxf * iSx;
-            
-            
-            % Bandlimited structure on G
-            LSEon = 0; % Default is using MMSE
-            if (LSEon)
-                ryx = ryx*rx;
-                rx = rx*rx;
-            end
-            p = dbd-1;
-            gaj = min(min(hsupport1,2*p+1),min([p+(1:hsupport1); hsupport1+p+1-(1:hsupport1)]));
-            cgaj = [0 cumsum(gaj)];
-            
-            G3 = zeros(hsupport1);
-            for kk=1:hsupport1
-                ki = max(0,kk-p-1);
-                G3(kk,ki+1:ki+gaj(kk)) = ryx(kk,ki+1:ki+gaj(kk))/rx(ki+1:ki+gaj(kk),ki+1:ki+gaj(kk));
-            end
-            % End Bandlimited structure
-            
-            G = G3;
-            G(abs(G)<0.01)=0;
-            G = suppress_overdrive * Sy * G * iSx;
-            
-            if 1
-                figure(32); mi=2;
-                surf(max(min(G,mi),-mi)); view(2)
-                title('Unscaled Masked Limited-bandwidth G');
-            end
-            pause(0.05);
-            
-            
-            % Reset sums
-            mmxs_b = zerovec;
-            mmys_b = zerovec;
-            s2xs_b = zerovec;
-            s2ys_b = zerovec;
-            Rxxs_b = zeromat;
-            Ryxs_b = zeromat;
-            count_b = 1;
-            
-        end
-        
-    end
-    
-    FECestimate2 = G*afTheFarEnd(:,idel);
-    
-    % compute Wiener filter and suppressor function
-    thefilter(:,i) = (noisyspec - gamma_echo*FECestimate2) ./ noisyspec;
-    ix0 = find(thefilter(:,i)<de_echo_bound);   % bounding trick 1
-    thefilter(ix0,i) = de_echo_bound;     % bounding trick 2
-    ix0 = find(thefilter(:,i)>1);   % bounding in reasonable range
-    thefilter(ix0,i) = 1;
-    
-    % NONLINEARITY
-    nl_alpha=0.8;    % memory; seems not very critical
-    nlSeverity=0.3;  % nonlinearity severity: 0 does nothing; 1 suppresses all
-    thefmean=mean(thefilter(8:16,i));
-    if (thefmean<1)
-        disp('');
-    end
-    runningfmean = nl_alpha*runningfmean + (1-nl_alpha)*thefmean;
-    aaa(sb+20+1:sb+20+updatel)=10000*runningfmean* ones(updatel,1); % debug
-    slope0=1.0/(1.0-nlSeverity); %
-    thegain = max(0.0,min(1.0,slope0*(runningfmean-nlSeverity)));
-    % END NONLINEARITY
-    thefilter(:,i) = thegain*thefilter(:,i);
-    
-    
-    % Wiener filtering
-    femicrophone(:,i) = fmicrophone(:,i) .* thefilter(:,i);
-    thelimiter(:,i) = (noisyspec - A_GAIN*FECestimate2) ./ noisyspec;
-    index = find(thelimiter(:,i)>1.0);
-    thelimiter(index,i) = 1.0;
-    index = find(thelimiter(:,i)<0.0);
-    thelimiter(index,i) = 0.0;
-    
-    if (rem(i,floor(updateno/20))==0)
-        fprintf(1,'.');
-    end
-    if mod(i,50)==0
-        waitbar_j(i/updateno,hh); 
-    end
-    
-    
-    % reconstruction; first make spectrum odd
-    temp=[femicrophone(:,i);flipud(conj(femicrophone(2:hsupport,i)))];
-    emicrophone(sb:se) = emicrophone(sb:se) + factor * win .* real(ifft(temp));
-
-end
-fprintf(1,'\n');
-
-close(hh);
\ No newline at end of file
diff --git a/src/modules/audio_processing/aecm/main/matlab/getBspectrum.m b/src/modules/audio_processing/aecm/main/matlab/getBspectrum.m
deleted file mode 100644
index a4a533d..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/getBspectrum.m
+++ /dev/null
@@ -1,22 +0,0 @@
-function bspectrum=getBspectrum(ps,threshold,bandfirst,bandlast)
-% function bspectrum=getBspectrum(ps,threshold,bandfirst,bandlast)
-% compute binary spectrum using threshold spectrum as pivot
-% bspectrum = binary spectrum (binary)
-% ps=current power spectrum (float)
-% threshold=threshold spectrum (float)
-% bandfirst = first band considered
-% bandlast = last band considered
-  
-% initialization stuff
-  if( length(ps)<bandlast | bandlast>32 | length(ps)~=length(threshold)) 
-  error('BinDelayEst:spectrum:invalid','Dimensionality error');
-end
-
-% get current binary spectrum
-diff = ps - threshold;
-bspectrum=uint32(0);
-for(i=bandfirst:bandlast)
-  if( diff(i)>0 ) 
-    bspectrum = bitset(bspectrum,i);
-  end
-end
diff --git a/src/modules/audio_processing/aecm/main/matlab/hisser2.m b/src/modules/audio_processing/aecm/main/matlab/hisser2.m
deleted file mode 100644
index 5a414f9..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/hisser2.m
+++ /dev/null
@@ -1,21 +0,0 @@
-function  bcount=hisser2(bs,bsr,bandfirst,bandlast)
-% function  bcount=hisser(bspectrum,bandfirst,bandlast)
-% histogram for the binary spectra
-% bcount= array of bit counts 
-% bs=binary spectrum (one int32 number each)  
-% bsr=reference binary spectra (one int32 number each)
-% blockSize = histogram over blocksize blocks
-% bandfirst = first band considered
-% bandlast = last band considered
-
-% weight all delays equally
-maxDelay = length(bsr);
-
-% compute counts (two methods; the first works better and is operational)
-bcount=zeros(maxDelay,1);
-for(i=1:maxDelay)
- % the delay should have low count for low-near&high-far and high-near&low-far
- bcount(i)= sum(bitget(bitxor(bs,bsr(i)),bandfirst:bandlast));  
- % the delay should have low count for low-near&high-far (works less well)
-% bcount(i)= sum(bitget(bitand(bsr(i),bitxor(bs,bsr(i))),bandfirst:bandlast));
-end
diff --git a/src/modules/audio_processing/aecm/main/matlab/main2.m b/src/modules/audio_processing/aecm/main/matlab/main2.m
deleted file mode 100644
index 7e24c69..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/main2.m
+++ /dev/null
@@ -1,19 +0,0 @@
-
-fid=fopen('aecfar.pcm'); far=fread(fid,'short'); fclose(fid);
-fid=fopen('aecnear.pcm'); mic=fread(fid,'short'); fclose(fid);
-
-%fid=fopen('QA1far.pcm'); far=fread(fid,'short'); fclose(fid);
-%fid=fopen('QA1near.pcm'); mic=fread(fid,'short'); fclose(fid);
-
-start=0 * 8000+1;
-stop= 30 * 8000;
-microphone=mic(start:stop);
-TheFarEnd=far(start:stop);
-avtime=1;
-
-% 16000 to make it compatible with the C-version
-[emicrophone,tdel]=compsup(microphone,TheFarEnd,avtime,16000); 
-
-spclab(8000,TheFarEnd,microphone,emicrophone);
-
-
diff --git a/src/modules/audio_processing/aecm/main/matlab/matlab/AECMobile.m b/src/modules/audio_processing/aecm/main/matlab/matlab/AECMobile.m
deleted file mode 100644
index 2d3e686..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/matlab/AECMobile.m
+++ /dev/null
@@ -1,269 +0,0 @@
-function [femicrophone, aecmStructNew, enerNear, enerFar] = AECMobile(fmicrophone, afTheFarEnd, setupStruct, aecmStruct)
-global NEARENDFFT;
-global F;
-
-aecmStructNew = aecmStruct;
-
-% Magnitude spectrum of near end signal
-afmicrophone = abs(fmicrophone);
-%afmicrophone = NEARENDFFT(setupStruct.currentBlock,:)'/2^F(setupStruct.currentBlock,end);
-% Near end energy level
-ener_orig = afmicrophone'*afmicrophone;
-if( ener_orig == 0)
-    lowlevel = 0.01;
-    afmicrophone = lowlevel*ones(size(afmicrophone));
-end
-%adiff = max(abs(afmicrophone - afTheFarEnd));
-%if (adiff > 0)
-%    disp([setupStruct.currentBlock adiff])
-%end
-
-% Store the near end energy
-%aecmStructNew.enerNear(setupStruct.currentBlock) = log(afmicrophone'*afmicrophone);
-aecmStructNew.enerNear(setupStruct.currentBlock) = log(sum(afmicrophone));
-% Store the far end energy
-%aecmStructNew.enerFar(setupStruct.currentBlock) = log(afTheFarEnd'*afTheFarEnd);
-aecmStructNew.enerFar(setupStruct.currentBlock) = log(sum(afTheFarEnd));
-
-% Update subbands (We currently use all frequency bins, hence .useSubBand is turned off)
-if aecmStructNew.useSubBand
-    internalIndex = 1;
-    for kk=1:setupStruct.subBandLength+1
-        ySubBand(kk) = mean(afmicrophone(internalIndex:internalIndex+setupStruct.numInBand(kk)-1).^aecmStructNew.bandFactor);
-        xSubBand(kk) = mean(afTheFarEnd(internalIndex:internalIndex+setupStruct.numInBand(kk)-1).^aecmStructNew.bandFactor);
-        internalIndex = internalIndex + setupStruct.numInBand(kk);
-    end
-else
-    ySubBand = afmicrophone.^aecmStructNew.bandFactor;
-    xSubBand = afTheFarEnd.^aecmStructNew.bandFactor;
-end
-
-% Estimated echo energy
-if (aecmStructNew.bandFactor == 1)
-    %aecmStructNew.enerEcho(setupStruct.currentBlock) = log((aecmStructNew.H.*xSubBand)'*(aecmStructNew.H.*xSubBand));
-    %aecmStructNew.enerEchoStored(setupStruct.currentBlock) = log((aecmStructNew.HStored.*xSubBand)'*(aecmStructNew.HStored.*xSubBand));
-    aecmStructNew.enerEcho(setupStruct.currentBlock) = log(sum(aecmStructNew.H.*xSubBand));
-    aecmStructNew.enerEchoStored(setupStruct.currentBlock) = log(sum(aecmStructNew.HStored.*xSubBand));
-elseif (aecmStructNew.bandFactor == 2)
-    aecmStructNew.enerEcho(setupStruct.currentBlock) = log(aecmStructNew.H'*xSubBand);
-    aecmStructNew.enerEchoStored(setupStruct.currentBlock) = log(aecmStructNew.HStored'*xSubBand);
-end
-
-% Last 100 blocks of data, used for plotting
-n100 = max(1,setupStruct.currentBlock-99):setupStruct.currentBlock;
-enerError = aecmStructNew.enerNear(n100)-aecmStructNew.enerEcho(n100);
-enerErrorStored = aecmStructNew.enerNear(n100)-aecmStructNew.enerEchoStored(n100);
-
-% Store the far end sub band. This is needed if we use LSE instead of NLMS
-aecmStructNew.X = [xSubBand aecmStructNew.X(:,1:end-1)];
-
-% Update energy levels, which control the VAD
-if ((aecmStructNew.enerFar(setupStruct.currentBlock) < aecmStructNew.energyMin) & (aecmStructNew.enerFar(setupStruct.currentBlock) >= aecmStruct.FAR_ENERGY_MIN))
-    aecmStructNew.energyMin = aecmStructNew.enerFar(setupStruct.currentBlock);
-    %aecmStructNew.energyMin = max(aecmStructNew.energyMin,12);
-    aecmStructNew.energyMin = max(aecmStructNew.energyMin,aecmStruct.FAR_ENERGY_MIN);
-    aecmStructNew.energyLevel = (aecmStructNew.energyMax-aecmStructNew.energyMin)*aecmStructNew.energyThres+aecmStructNew.energyMin;
-    aecmStructNew.energyLevelMSE = (aecmStructNew.energyMax-aecmStructNew.energyMin)*aecmStructNew.energyThresMSE+aecmStructNew.energyMin;
-end
-if (aecmStructNew.enerFar(setupStruct.currentBlock) > aecmStructNew.energyMax)
-    aecmStructNew.energyMax = aecmStructNew.enerFar(setupStruct.currentBlock);
-    aecmStructNew.energyLevel = (aecmStructNew.energyMax-aecmStructNew.energyMin)*aecmStructNew.energyThres+aecmStructNew.energyMin;
-    aecmStructNew.energyLevelMSE = (aecmStructNew.energyMax-aecmStructNew.energyMin)*aecmStructNew.energyThresMSE+aecmStructNew.energyMin;
-end
-
-% Calculate current energy error in near end (estimated echo vs. near end)
-dE = aecmStructNew.enerNear(setupStruct.currentBlock)-aecmStructNew.enerEcho(setupStruct.currentBlock);
-
-%%%%%%%%
-% Calculate step size used in LMS algorithm, based on current far end energy and near end energy error (dE)
-%%%%%%%%
-if setupStruct.stepSize_flag
-    [mu, aecmStructNew] = calcStepSize(aecmStructNew.enerFar(setupStruct.currentBlock), dE, aecmStructNew, setupStruct.currentBlock, 1);
-else
-    mu = 0.25;
-end
-aecmStructNew.muLog(setupStruct.currentBlock) = mu; % Store the step size
-
-% Estimate Echo Spectral Shape
-[U, aecmStructNew.H] = fallerEstimator(ySubBand,aecmStructNew.X,aecmStructNew.H,mu);
-
-%%%%%
-% Determine if we should store or restore the channel
-%%%%%
-if ((setupStruct.currentBlock <= aecmStructNew.convLength) | (~setupStruct.channelUpdate_flag))
-    aecmStructNew.HStored = aecmStructNew.H; % Store what you have after startup
-elseif ((setupStruct.currentBlock > aecmStructNew.convLength) & (setupStruct.channelUpdate_flag))
-    if ((aecmStructNew.enerFar(setupStruct.currentBlock) < aecmStructNew.energyLevelMSE) & (aecmStructNew.enerFar(setupStruct.currentBlock-1) >= aecmStructNew.energyLevelMSE))
-        xxx = aecmStructNew.countMseH;
-        if (xxx > 20)
-            mseStored = mean(abs(aecmStructNew.enerEchoStored(setupStruct.currentBlock-xxx:setupStruct.currentBlock-1)-aecmStructNew.enerNear(setupStruct.currentBlock-xxx:setupStruct.currentBlock-1)));
-            mseLatest = mean(abs(aecmStructNew.enerEcho(setupStruct.currentBlock-xxx:setupStruct.currentBlock-1)-aecmStructNew.enerNear(setupStruct.currentBlock-xxx:setupStruct.currentBlock-1)));
-            %fprintf('Stored: %4f Latest: %4f\n', mseStored, mseLatest) % Uncomment if you want to display the MSE values
-            if ((mseStored < 0.8*mseLatest) & (aecmStructNew.mseHStoredOld < 0.8*aecmStructNew.mseHLatestOld))
-                aecmStructNew.H = aecmStructNew.HStored;
-                fprintf('Restored H at block %d\n',setupStruct.currentBlock)
-            elseif (((0.8*mseStored > mseLatest) & (mseLatest < aecmStructNew.mseHThreshold) & (aecmStructNew.mseHLatestOld < aecmStructNew.mseHThreshold)) | (mseStored == Inf))
-                aecmStructNew.HStored = aecmStructNew.H;
-                fprintf('Stored new H at block %d\n',setupStruct.currentBlock)
-            end
-            aecmStructNew.mseHStoredOld = mseStored;
-            aecmStructNew.mseHLatestOld = mseLatest;
-        end
-    elseif ((aecmStructNew.enerFar(setupStruct.currentBlock) >= aecmStructNew.energyLevelMSE) & (aecmStructNew.enerFar(setupStruct.currentBlock-1) < aecmStructNew.energyLevelMSE))
-        aecmStructNew.countMseH = 1;
-    elseif (aecmStructNew.enerFar(setupStruct.currentBlock) >= aecmStructNew.energyLevelMSE)
-        aecmStructNew.countMseH = aecmStructNew.countMseH + 1;
-    end
-end
-
-%%%%%
-% Check delay (calculate the delay offset (if we can))
-% The algorithm is not tuned and should be used with care. It runs separately from Bastiaan's algorithm.
-%%%%%
-yyy = 31; % Correlation buffer length (currently unfortunately hard coded)
-dxxx = 25; % Maximum offset (currently unfortunately hard coded)
-if (setupStruct.currentBlock > aecmStructNew.convLength)
-    if (aecmStructNew.enerFar(setupStruct.currentBlock-(yyy+2*dxxx-1):setupStruct.currentBlock) > aecmStructNew.energyLevelMSE)
-        for xxx = -dxxx:dxxx
-            aecmStructNew.delayLatestS(xxx+dxxx+1) = sum(sign(aecmStructNew.enerEcho(setupStruct.currentBlock-(yyy+dxxx-xxx)+1:setupStruct.currentBlock+xxx-dxxx)-mean(aecmStructNew.enerEcho(setupStruct.currentBlock-(yyy++dxxx-xxx)+1:setupStruct.currentBlock+xxx-dxxx))).*sign(aecmStructNew.enerNear(setupStruct.currentBlock-yyy-dxxx+1:setupStruct.currentBlock-dxxx)-mean(aecmStructNew.enerNear(setupStruct.currentBlock-yyy-dxxx+1:setupStruct.currentBlock-dxxx))));
-        end
-        aecmStructNew.newDelayCurve = 1;
-    end
-end
-if ((setupStruct.currentBlock > 2*aecmStructNew.convLength) & ~rem(setupStruct.currentBlock,yyy*2) & aecmStructNew.newDelayCurve)
-    [maxV,maxP] = max(aecmStructNew.delayLatestS);
-    if ((maxP > 2) & (maxP < 2*dxxx))
-        maxVLeft = aecmStructNew.delayLatestS(max(1,maxP-4));
-        maxVRight = aecmStructNew.delayLatestS(min(2*dxxx+1,maxP+4));
-        %fprintf('Max %d, Left %d, Right %d\n',maxV,maxVLeft,maxVRight) % Uncomment if you want to see max value
-        if ((maxV > 24) & (maxVLeft < maxV - 10)  & (maxVRight < maxV - 10))
-            aecmStructNew.feedbackDelay = maxP-dxxx-1;
-            aecmStructNew.newDelayCurve = 0;
-            aecmStructNew.feedbackDelayUpdate = 1;
-            fprintf('Feedback Update at block %d\n',setupStruct.currentBlock)
-        end
-    end
-end
-% End of "Check delay"
-%%%%%%%%
-
-%%%%%
-% Calculate suppression gain, based on far end energy and near end energy error (dE)
-if (setupStruct.supGain_flag)
-    [gamma_echo, aecmStructNew.cntIn, aecmStructNew.cntOut] = calcFilterGain(aecmStructNew.enerFar(setupStruct.currentBlock), dE, aecmStructNew, setupStruct.currentBlock, aecmStructNew.convLength, aecmStructNew.cntIn, aecmStructNew.cntOut);
-else
-    gamma_echo = 1;
-end
-aecmStructNew.gammaLog(setupStruct.currentBlock) = gamma_echo; % Store the gain
-gamma_use = gamma_echo;
-
-% Use the stored channel
-U = aecmStructNew.HStored.*xSubBand;
-
-% compute Wiener filter and suppressor function
-Iy = find(ySubBand);
-subBandFilter = zeros(size(ySubBand));
-if (aecmStructNew.bandFactor == 2)
-    subBandFilter(Iy) = (1 - gamma_use*sqrt(U(Iy)./ySubBand(Iy))); % For Faller
-else
-    subBandFilter(Iy) = (1 - gamma_use*(U(Iy)./ySubBand(Iy))); % For COV
-end
-ix0 = find(subBandFilter < 0);   % bounding trick 1
-subBandFilter(ix0) = 0;
-ix0 = find(subBandFilter > 1);   % bounding trick 1
-subBandFilter(ix0) = 1;
-
-% Interpolate back to normal frequency bins if we use sub bands
-if aecmStructNew.useSubBand
-    thefilter = interp1(setupStruct.centerFreq,subBandFilter,linspace(0,setupStruct.samplingfreq/2,setupStruct.hsupport1)','nearest');
-    testfilter = interp1(setupStruct.centerFreq,subBandFilter,linspace(0,setupStruct.samplingfreq/2,1000),'nearest');
-    thefilter(end) = subBandFilter(end);
-    
-    internalIndex = 1;
-    for kk=1:setupStruct.subBandLength+1
-        internalIndex:internalIndex+setupStruct.numInBand(kk)-1;
-        thefilter(internalIndex:internalIndex+setupStruct.numInBand(kk)-1) = subBandFilter(kk);
-        internalIndex = internalIndex + setupStruct.numInBand(kk);
-    end
-else
-    thefilter = subBandFilter;
-    testfilter = subBandFilter;
-end
-
-% Bound the filter
-ix0 = find(thefilter < setupStruct.de_echo_bound);   % bounding trick 1
-thefilter(ix0) = setupStruct.de_echo_bound;     % bounding trick 2
-ix0 = find(thefilter > 1);   % bounding in reasonable range
-thefilter(ix0) = 1;
-
-%%%%
-% NLP
-%%%%
-thefmean = mean(thefilter(8:16));
-if (thefmean < 1)
-    disp('');
-end
-aecmStructNew.runningfmean = setupStruct.nl_alpha*aecmStructNew.runningfmean + (1-setupStruct.nl_alpha)*thefmean;
-slope0 = 1.0/(1.0 - setupStruct.nlSeverity); %
-thegain = max(0.0, min(1.0, slope0*(aecmStructNew.runningfmean - setupStruct.nlSeverity)));
-if ~setupStruct.nlp_flag
-    thegain = 1;
-end
-% END NONLINEARITY
-thefilter = thegain*thefilter;
-
-%%%%
-% The suppression
-%%%%
-femicrophone = fmicrophone .* thefilter;
-% Store the output energy (used for plotting)
-%aecmStructNew.enerOut(setupStruct.currentBlock) = log(abs(femicrophone)'*abs(femicrophone));
-aecmStructNew.enerOut(setupStruct.currentBlock) = log(sum(abs(femicrophone)));
-
-if aecmStructNew.plotIt
-    figure(13)
-    subplot(311)
-    %plot(n100,enerFar(n100),'b-',n100,enerNear(n100),'k--',n100,enerEcho(n100),'r-',[n100(1) n100(end)],[1 1]*vadThNew,'b:',[n100(1) n100(end)],[1 1]*((energyMax-energyMin)/4+energyMin),'r-.',[n100(1) n100(end)],[1 1]*vadNearThNew,'g:',[n100(1) n100(end)],[1 1]*energyMax,'r-.',[n100(1) n100(end)],[1 1]*energyMin,'r-.','LineWidth',2)
-    plot(n100,aecmStructNew.enerFar(n100),'b-',n100,aecmStructNew.enerNear(n100),'k--',n100,aecmStructNew.enerOut(n100),'r-.',n100,aecmStructNew.enerEcho(n100),'r-',n100,aecmStructNew.enerEchoStored(n100),'c-',[n100(1) n100(end)],[1 1]*((aecmStructNew.energyMax-aecmStructNew.energyMin)/4+aecmStructNew.energyMin),'g-.',[n100(1) n100(end)],[1 1]*aecmStructNew.energyMax,'g-.',[n100(1) n100(end)],[1 1]*aecmStructNew.energyMin,'g-.','LineWidth',2)
-    %title(['Frame ',int2str(i),' av ',int2str(setupStruct.updateno),' State = ',int2str(speechState),' \mu = ',num2str(mu)])
-    title(['\gamma = ',num2str(gamma_echo),' \mu = ',num2str(mu)])
-    subplot(312)
-    %plot(n100,enerError,'b-',[n100(1) n100(end)],[1 1]*vadNearTh,'r:',[n100(1) n100(end)],[-1.5 -1.5]*vadNearTh,'r:','LineWidth',2)
-    %plot(n100,enerError,'b-',[n100(1) n100(end)],[1 1],'r:',[n100(1) n100(end)],[-2 -2],'r:','LineWidth',2)
-    plot(n100,enerError,'b-',n100,enerErrorStored,'c-',[n100(1) n100(end)],[1 1]*aecmStructNew.varMean,'k--',[n100(1) n100(end)],[1 1],'r:',[n100(1) n100(end)],[-2 -2],'r:','LineWidth',2)
-    % Plot mu
-    %plot(n100,log2(aecmStructNew.muLog(n100)),'b-','LineWidth',2)
-    %plot(n100,log2(aecmStructNew.HGain(n100)),'b-',[n100(1) n100(end)],[1 1]*log2(sum(aecmStructNew.HStored)),'r:','LineWidth',2)
-    title(['Block ',int2str(setupStruct.currentBlock),' av ',int2str(setupStruct.updateno)])
-    subplot(313)
-    %plot(n100,enerVar(n100),'b-',[n100(1) n100(end)],[1 1],'r:',[n100(1) n100(end)],[-2 -2],'r:','LineWidth',2)
-    %plot(n100,enerVar(n100),'b-','LineWidth',2)
-    % Plot correlation curve
-
-    %plot(-25:25,aecmStructNew.delayStored/max(aecmStructNew.delayStored),'c-',-25:25,aecmStructNew.delayLatest/max(aecmStructNew.delayLatest),'r-',-25:25,(max(aecmStructNew.delayStoredS)-aecmStructNew.delayStoredS)/(max(aecmStructNew.delayStoredS)-min(aecmStructNew.delayStoredS)),'c:',-25:25,(max(aecmStructNew.delayLatestS)-aecmStructNew.delayLatestS)/(max(aecmStructNew.delayLatestS)-min(aecmStructNew.delayLatestS)),'r:','LineWidth',2)
-    %plot(-25:25,aecmStructNew.delayStored,'c-',-25:25,aecmStructNew.delayLatest,'r-',-25:25,(max(aecmStructNew.delayStoredS)-aecmStructNew.delayStoredS)/(max(aecmStructNew.delayStoredS)-min(aecmStructNew.delayStoredS)),'c:',-25:25,(max(aecmStructNew.delayLatestS)-aecmStructNew.delayLatestS)/(max(aecmStructNew.delayLatestS)-min(aecmStructNew.delayLatestS)),'r:','LineWidth',2)
-    %plot(-25:25,aecmStructNew.delayLatest,'r-',-25:25,(50-aecmStructNew.delayLatestS)/100,'r:','LineWidth',2)
-    plot(-25:25,aecmStructNew.delayLatestS,'r:','LineWidth',2)
-    %plot(-25:25,aecmStructNew.delayStored,'c-',-25:25,aecmStructNew.delayLatest,'r-','LineWidth',2)
-    plot(0:32,aecmStruct.HStored,'bo-','LineWidth',2)
-    %title(['\gamma | In = ',int2str(aecmStructNew.muStruct.countInInterval),' | Out High = ',int2str(aecmStructNew.muStruct.countOutHighInterval),' | Out Low = ',int2str(aecmStructNew.muStruct.countOutLowInterval)])
-    pause(1)
-    %if ((setupStruct.currentBlock == 860) | (setupStruct.currentBlock == 420) | (setupStruct.currentBlock == 960))
-    if 0%(setupStruct.currentBlock == 960)
-        figure(60)
-        plot(n100,aecmStructNew.enerNear(n100),'k--',n100,aecmStructNew.enerEcho(n100),'k:','LineWidth',2)
-        legend('Near End','Estimated Echo')
-        title('Signal Energy witH offset compensation')
-        figure(61)
-        subplot(211)
-        stem(sign(aecmStructNew.enerNear(n100)-mean(aecmStructNew.enerNear(n100))))
-        title('Near End Energy Pattern (around mean value)')
-        subplot(212)
-        stem(sign(aecmStructNew.enerEcho(n100)-mean(aecmStructNew.enerEcho(n100))))
-        title('Estimated Echo Energy Pattern (around mean value)')
-        pause
-    end
-    drawnow%,pause
-elseif ~rem(setupStruct.currentBlock,100)
-    fprintf('Block %d of %d\n',setupStruct.currentBlock,setupStruct.updateno)
-end
diff --git a/src/modules/audio_processing/aecm/main/matlab/matlab/align.m b/src/modules/audio_processing/aecm/main/matlab/matlab/align.m
deleted file mode 100644
index 9b9c0ba..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/matlab/align.m
+++ /dev/null
@@ -1,98 +0,0 @@
-function [delayStructNew] = align(xf, yf, delayStruct, i, trueDelay);
-
-%%%%%%%
-% Bastiaan's algorithm copied
-%%%%%%%
-Ap500 = [1.00, -4.95, 9.801, -9.70299, 4.80298005, -0.9509900499];
-Bp500 = [0.662743088639636, -2.5841655608125, 3.77668102146288, -2.45182477425154, 0.596566274575251, 0.0];
-Ap200 = [1.00, -4.875, 9.50625, -9.26859375, 4.518439453125, -0.881095693359375];
-Bp200 = [0.862545460994275, -3.2832804496114, 4.67892032308828, -2.95798023879133, 0.699796870041299, 0.0];
-
-oldMethod = 1; % Turn on or off the old method. The new one is Bastiaan's August 2008 updates
-THReSHoLD = 2.0; % ADJUSTABLE threshold factor; 4.0 seems good
-%%%%%%%%%%%%%%%%%%%
-% use log domain (showed improved performance)
-xxf = sqrt(real(xf.*conj(xf))+1e-20);
-yyf = sqrt(real(yf.*conj(yf))+1e-20);
-delayStruct.sxAll2(:,i) = 20*log10(xxf);
-delayStruct.syAll2(:,i) = 20*log10(yyf);
-
-mD = min(i-1,delayStruct.maxDelayb);
-if oldMethod
-    factor = 1.0;
-    histLenb = 250;
-    xthreshold = factor*median(delayStruct.sxAll2(:,i-mD:i),2);
-    ythreshold = factor*median(delayStruct.syAll2(:,i-mD:i),2);
-else
-    xthreshold = sum(delayStruct.sxAll2(:,i-mD:i),2)/(delayStruct.maxDelayb+1);
-    
-    [yout, delayStruct.z200] = filter(Bp200, Ap200, delayStruct.syAll2(:,i), delayStruct.z200, 2);
-    yout = yout/(delayStruct.maxDelayb+1);
-    ythreshold = mean(delayStruct.syAll2(:,i-mD:i),2);
-    ythreshold = yout;
-end
-
-delayStruct.bxspectrum(i) = getBspectrum(delayStruct.sxAll2(:,i), xthreshold, delayStruct.bandfirst, delayStruct.bandlast);
-delayStruct.byspectrum(i) = getBspectrum(delayStruct.syAll2(:,i), ythreshold, delayStruct.bandfirst, delayStruct.bandlast);
-
-delayStruct.bxhist(end-mD:end) = delayStruct.bxspectrum(i-mD:i);
-
-delayStruct.bcount(:,i) = hisser2(delayStruct.byspectrum(i), flipud(delayStruct.bxhist), delayStruct.bandfirst, delayStruct.bandlast);
-[delayStruct.fout(:,i), delayStruct.z500] = filter(Bp500, Ap500, delayStruct.bcount(:,i), delayStruct.z500, 2);
-if oldMethod
-    %delayStruct.new(:,i) = sum(delayStruct.bcount(:,max(1,i-histLenb+1):i),2); % using the history range
-    tmpVec = [delayStruct.fout(1,i)*ones(2,1); delayStruct.fout(:,i); delayStruct.fout(end,i)*ones(2,1)]; % using the history range
-    tmpVec = filter(ones(1,5), 1, tmpVec);
-    delayStruct.new(:,i) = tmpVec(5:end);
-    %delayStruct.new(:,i) = delayStruct.fout(:,i); % using the history range
-else
-    [delayStruct.fout(:,i), delayStruct.z500] = filter(Bp500, Ap500, delayStruct.bcount(:,i), delayStruct.z500, 2);
-    % NEW CODE
-    delayStruct.new(:,i) = filter([-1,-2,1,4,1,-2,-1], 1, delayStruct.fout(:,i)); %remv smth component
-    delayStruct.new(1:end-3,i) = delayStruct.new(1+3:end,i);
-    delayStruct.new(1:6,i) = 0.0;
-    delayStruct.new(end-6:end,i) = 0.0;  % ends are no good
-end
-[valuen, tempdelay] = min(delayStruct.new(:,i));  % find minimum
-if oldMethod
-    threshold = valuen + (max(delayStruct.new(:,i)) - valuen)/4;
-    thIndex = find(delayStruct.new(:,i) <= threshold);
-    if (i > 1)
-        delayDiff = abs(delayStruct.delay(i-1)-tempdelay+1);
-        if (delayStruct.oneGoodEstimate & (max(diff(thIndex)) > 1) & (delayDiff < 10))
-            % We consider this minimum to be significant, hence update the delay
-            delayStruct.delay(i) = tempdelay;
-        elseif (~delayStruct.oneGoodEstimate & (max(diff(thIndex)) > 1))
-            delayStruct.delay(i) = tempdelay;
-            if (i > histLenb)
-                delayStruct.oneGoodEstimate = 1;
-            end
-        else
-            delayStruct.delay(i) = delayStruct.delay(i-1);
-        end
-    else
-        delayStruct.delay(i) = tempdelay;
-    end
-else
-    threshold = THReSHoLD*std(delayStruct.new(:,i));   % set updata threshold 
-    if ((-valuen > threshold) | (i < delayStruct.smlength)) % see if you want to update delay
-        delayStruct.delay(i) = tempdelay;
-    else
-        delayStruct.delay(i) = delayStruct.delay(i-1);
-    end
-    % END NEW CODE
-end
-delayStructNew = delayStruct;
-
-% administrative and plotting stuff
-if( 0)
-    figure(10);
-    plot([1:length(delayStructNew.new(:,i))],delayStructNew.new(:,i),trueDelay*[1 1],[min(delayStructNew.new(:,i)),max(delayStructNew.new(:,i))],'r',[1 length(delayStructNew.new(:,i))],threshold*[1 1],'r:', 'LineWidth',2);
-    %plot([1:length(delayStructNew.bcount(:,i))],delayStructNew.bcount(:,i),trueDelay*[1 1],[min(delayStructNew.bcount(:,i)),max(delayStructNew.bcount(:,i))],'r','LineWidth',2);
-    %plot([thedelay,thedelay],[min(fcount(:,i)),max(fcount(:,i))],'r');
-    %title(sprintf('bin count and known delay at time %5.1f s\n',(i-1)*(support/(fs*oversampling))));
-    title(delayStructNew.oneGoodEstimate)
-    xlabel('delay in frames');
-    %hold off;
-    drawnow
-end
diff --git a/src/modules/audio_processing/aecm/main/matlab/matlab/calcFilterGain.m b/src/modules/audio_processing/aecm/main/matlab/matlab/calcFilterGain.m
deleted file mode 100644
index a09a7f2..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/matlab/calcFilterGain.m
+++ /dev/null
@@ -1,88 +0,0 @@
-function [gam, cntIn2, cntOut2] = calcFilterGain(energy, dE, aecmStruct, t, T, cntIn, cntOut)
-
-defaultLevel = 1.2;
-cntIn2 = cntIn;
-cntOut2 = cntOut;
-if (t < T)
-    gam = 1;
-else
-    dE1 = -5;
-    dE2 = 1;
-    gamMid = 0.2;
-    gam = max(0,min((energy - aecmStruct.energyMin)/(aecmStruct.energyLevel - aecmStruct.energyMin), 1-(1-gamMid)*(aecmStruct.energyMax-energy)/(aecmStruct.energyMax-aecmStruct.energyLevel)));
-    
-    dEOffset = -0.5;
-    dEWidth = 1.5;
-    %gam2 = max(1,2-((dE-dEOffset)/(dE2-dEOffset)).^2);
-    gam2 = 1+(abs(dE-dEOffset)<(dE2-dEOffset));
-    
-    gam = gam*gam2;
-    
-    
-    if (energy < aecmStruct.energyLevel)
-        gam = 0;
-    else
-        gam = defaultLevel;
-    end
-    dEVec = aecmStruct.enerNear(t-63:t)-aecmStruct.enerEcho(t-63:t);
-    %dEVec = aecmStruct.enerNear(t-20:t)-aecmStruct.enerEcho(t-20:t);
-    numCross = 0;
-    currentState = 0;
-    for ii=1:64
-        if (currentState == 0)
-            currentState = (dEVec(ii) > dE2) - (dEVec(ii) < -2);
-        elseif ((currentState == 1) & (dEVec(ii) < -2))
-            numCross = numCross + 1;
-            currentState = -1;
-        elseif ((currentState == -1) & (dEVec(ii) > dE2))
-            numCross = numCross + 1;
-            currentState = 1;
-        end
-    end
-    gam = max(0, gam - numCross/25);
-    gam = 1;
-    
-    ener_A = 1;
-    ener_B = 0.8;
-    ener_C = aecmStruct.energyLevel + (aecmStruct.energyMax-aecmStruct.energyLevel)/5;
-    dE_A = 4;%2;
-    dE_B = 3.6;%1.8;
-    dE_C = 0.9*dEWidth;
-    dE_D = 1;
-    timeFactorLength = 10;
-    ddE = abs(dE-dEOffset);
-    if (energy < aecmStruct.energyLevel)
-        gam = 0;
-    else
-        gam = 1;
-        gam2 = max(0, min(ener_B*(energy-aecmStruct.energyLevel)/(ener_C-aecmStruct.energyLevel), ener_B+(ener_A-ener_B)*(energy-ener_C)/(aecmStruct.energyMax-ener_C)));
-        if (ddE < dEWidth)
-            % Update counters
-            cntIn2 = cntIn2 + 1;
-            if (cntIn2 > 2)
-                cntOut2 = 0;
-            end
-            gam3 = max(dE_D, min(dE_A-(dE_A-dE_B)*(ddE/dE_C), dE_D+(dE_B-dE_D)*(dEWidth-ddE)/(dEWidth-dE_C)));
-            gam3 = dE_A;
-        else
-            % Update counters
-            cntOut2 = cntOut2 + 1;
-            if (cntOut2 > 2)
-                cntIn2 = 0;
-            end
-            %gam2 = 1;
-            gam3 = dE_D;
-        end
-        timeFactor = min(1, cntIn2/timeFactorLength);
-        gam = gam*(1-timeFactor) + timeFactor*gam2*gam3;
-    end
-    %gam = gam/floor(numCross/2+1);
-end
-if isempty(gam)
-    numCross
-    timeFactor
-    cntIn2
-    cntOut2
-    gam2
-    gam3
-end
diff --git a/src/modules/audio_processing/aecm/main/matlab/matlab/calcStepSize.m b/src/modules/audio_processing/aecm/main/matlab/matlab/calcStepSize.m
deleted file mode 100644
index ae1365f..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/matlab/calcStepSize.m
+++ /dev/null
@@ -1,105 +0,0 @@
-function [mu, aecmStructNew] = calcStepSize(energy, dE, aecmStruct, t, logscale)
-
-if (nargin < 4)
-    t = 1;
-    logscale = 1;
-elseif (nargin == 4)
-    logscale = 1;
-end
-T = aecmStruct.convLength;
-
-if logscale
-    currentMuMax = aecmStruct.MU_MIN + (aecmStruct.MU_MAX-aecmStruct.MU_MIN)*min(t,T)/T;
-    if (aecmStruct.energyMin >= aecmStruct.energyMax)
-        mu = aecmStruct.MU_MIN;
-    else
-        mu = (energy - aecmStruct.energyMin)/(aecmStruct.energyMax - aecmStruct.energyMin)*(currentMuMax-aecmStruct.MU_MIN) + aecmStruct.MU_MIN;
-    end
-    mu = 2^mu;
-    if (energy < aecmStruct.energyLevel)
-        mu = 0;
-    end
-else
-    muMin = 0;
-    muMax = 0.5;
-    currentMuMax = muMin + (muMax-muMin)*min(t,T)/T;
-    if (aecmStruct.energyMin >= aecmStruct.energyMax)
-        mu = muMin;
-    else
-        mu = (energy - aecmStruct.energyMin)/(aecmStruct.energyMax - aecmStruct.energyMin)*(currentMuMax-muMin) + muMin;
-    end
-end
-dE2 = 1;
-dEOffset = -0.5;
-offBoost = 5;
-if (mu > 0)
-    if (abs(dE-aecmStruct.ENERGY_DEV_OFFSET) > aecmStruct.ENERGY_DEV_TOL)
-        aecmStruct.muStruct.countInInterval = 0;
-    else
-        aecmStruct.muStruct.countInInterval = aecmStruct.muStruct.countInInterval + 1;
-    end
-    if (dE < aecmStruct.ENERGY_DEV_OFFSET - aecmStruct.ENERGY_DEV_TOL)
-        aecmStruct.muStruct.countOutLowInterval = aecmStruct.muStruct.countOutLowInterval + 1;
-    else
-        aecmStruct.muStruct.countOutLowInterval = 0;
-    end
-    if (dE > aecmStruct.ENERGY_DEV_OFFSET + aecmStruct.ENERGY_DEV_TOL)
-        aecmStruct.muStruct.countOutHighInterval = aecmStruct.muStruct.countOutHighInterval + 1;
-    else
-        aecmStruct.muStruct.countOutHighInterval = 0;
-    end
-end
-muVar = 2^min(-3,5/50*aecmStruct.muStruct.countInInterval-3);
-muOff = 2^max(offBoost,min(0,offBoost*(aecmStruct.muStruct.countOutLowInterval-aecmStruct.muStruct.minOutLowInterval)/(aecmStruct.muStruct.maxOutLowInterval-aecmStruct.muStruct.minOutLowInterval)));
-
-muLow = 1/64;
-muVar = 1;
-if (t < 2*T)
-    muDT = 1;
-    muVar = 1;
-    mdEVec = 0;
-    numCross = 0;
-else
-    muDT = min(1,max(muLow,1-(1-muLow)*(dE-aecmStruct.ENERGY_DEV_OFFSET)/aecmStruct.ENERGY_DEV_TOL));
-    dEVec = aecmStruct.enerNear(t-63:t)-aecmStruct.enerEcho(t-63:t);
-    %dEVec = aecmStruct.enerNear(t-20:t)-aecmStruct.enerEcho(t-20:t);
-    numCross = 0;
-    currentState = 0;
-    for ii=1:64
-        if (currentState == 0)
-            currentState = (dEVec(ii) > dE2) - (dEVec(ii) < -2);
-        elseif ((currentState == 1) & (dEVec(ii) < -2))
-            numCross = numCross + 1;
-            currentState = -1;
-        elseif ((currentState == -1) & (dEVec(ii) > dE2))
-            numCross = numCross + 1;
-            currentState = 1;
-        end
-    end
-            
-    %logicDEVec = (dEVec > dE2) - (dEVec < -2);
-    %numCross = sum(abs(diff(logicDEVec)));
-    %mdEVec = mean(abs(dEVec-dEOffset));
-    %mdEVec = mean(abs(dEVec-mean(dEVec)));
-    %mdEVec = max(dEVec)-min(dEVec);
-    %if (mdEVec > 4)%1.5)
-    %    muVar = 0;
-    %end
-    muVar = 2^(-floor(numCross/2));
-    muVar = 2^(-numCross);
-end
-%muVar = 1;
-
-
-% if (eStd > (dE2-dEOffset))
-%     muVar = 1/8;
-% else
-%     muVar = 1;
-% end
-
-%mu = mu*muDT*muVar*muOff;
-mu = mu*muDT*muVar;
-mu = min(mu,0.25);
-aecmStructNew = aecmStruct;
-%aecmStructNew.varMean = mdEVec;
-aecmStructNew.varMean = numCross;
diff --git a/src/modules/audio_processing/aecm/main/matlab/matlab/fallerEstimator.m b/src/modules/audio_processing/aecm/main/matlab/matlab/fallerEstimator.m
deleted file mode 100644
index d038b51..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/matlab/fallerEstimator.m
+++ /dev/null
@@ -1,42 +0,0 @@
-function [U, Hnew] = fallerEstimator(Y, X, H, mu)
-
-% Near end signal is stacked frame by frame columnwise in matrix Y and far end in X
-%
-% Possible estimation procedures are
-% 1) LSE
-% 2) NLMS
-% 3) Separated numerator and denomerator filters
-regParam = 1;
-[numFreqs, numFrames] = size(Y);
-[numFreqs, Q] = size(X);
-U = zeros(numFreqs, 1);
-
-if ((nargin == 3) | (nargin == 5))
-    dtd = 0;
-end
-if (nargin == 4)
-    dtd = H;
-end
-Emax = 7;
-dEH = Emax-sum(sum(H));
-nu = 2*mu;
-% if (nargin < 5)
-%     H = zeros(numFreqs, Q);
-%     for kk = 1:numFreqs
-%         Xmatrix = hankel(X(kk,1:Q),X(kk,Q:end));
-%         y = Y(kk,1:end-Q+1)';
-%         H(kk,:) = (y'*Xmatrix')*inv(Xmatrix*Xmatrix'+regParam);
-%         U(kk,1) = H(kk,:)*Xmatrix(:,1);
-%     end
-% else
-    for kk = 1:numFreqs
-        x = X(kk,1:Q)';
-        y = Y(kk,1);
-        Htmp = mu*(y-H(kk,:)*x)/(x'*x+regParam)*x;
-        %Htmp = (mu*(y-H(kk,:)*x)/(x'*x+regParam) - nu/dEH)*x;
-        H(kk,:) = H(kk,:) + Htmp';
-        U(kk,1) = H(kk,:)*x;
-    end
-% end
-
-Hnew = H;
diff --git a/src/modules/audio_processing/aecm/main/matlab/matlab/getBspectrum.m b/src/modules/audio_processing/aecm/main/matlab/matlab/getBspectrum.m
deleted file mode 100644
index a4a533d..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/matlab/getBspectrum.m
+++ /dev/null
@@ -1,22 +0,0 @@
-function bspectrum=getBspectrum(ps,threshold,bandfirst,bandlast)
-% function bspectrum=getBspectrum(ps,threshold,bandfirst,bandlast)
-% compute binary spectrum using threshold spectrum as pivot
-% bspectrum = binary spectrum (binary)
-% ps=current power spectrum (float)
-% threshold=threshold spectrum (float)
-% bandfirst = first band considered
-% bandlast = last band considered
-  
-% initialization stuff
-  if( length(ps)<bandlast | bandlast>32 | length(ps)~=length(threshold)) 
-  error('BinDelayEst:spectrum:invalid','Dimensionality error');
-end
-
-% get current binary spectrum
-diff = ps - threshold;
-bspectrum=uint32(0);
-for(i=bandfirst:bandlast)
-  if( diff(i)>0 ) 
-    bspectrum = bitset(bspectrum,i);
-  end
-end
diff --git a/src/modules/audio_processing/aecm/main/matlab/matlab/hisser2.m b/src/modules/audio_processing/aecm/main/matlab/matlab/hisser2.m
deleted file mode 100644
index 5a414f9..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/matlab/hisser2.m
+++ /dev/null
@@ -1,21 +0,0 @@
-function  bcount=hisser2(bs,bsr,bandfirst,bandlast)
-% function  bcount=hisser(bspectrum,bandfirst,bandlast)
-% histogram for the binary spectra
-% bcount= array of bit counts 
-% bs=binary spectrum (one int32 number each)  
-% bsr=reference binary spectra (one int32 number each)
-% blockSize = histogram over blocksize blocks
-% bandfirst = first band considered
-% bandlast = last band considered
-
-% weight all delays equally
-maxDelay = length(bsr);
-
-% compute counts (two methods; the first works better and is operational)
-bcount=zeros(maxDelay,1);
-for(i=1:maxDelay)
- % the delay should have low count for low-near&high-far and high-near&low-far
- bcount(i)= sum(bitget(bitxor(bs,bsr(i)),bandfirst:bandlast));  
- % the delay should have low count for low-near&high-far (works less well)
-% bcount(i)= sum(bitget(bitand(bsr(i),bitxor(bs,bsr(i))),bandfirst:bandlast));
-end
diff --git a/src/modules/audio_processing/aecm/main/matlab/matlab/mainProgram.m b/src/modules/audio_processing/aecm/main/matlab/matlab/mainProgram.m
deleted file mode 100644
index eeb2aaa..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/matlab/mainProgram.m
+++ /dev/null
@@ -1,283 +0,0 @@
-useHTC = 1; % Set this if you want to run a single file and set file names below. Otherwise use simEnvironment to run from several scenarios in a row
-delayCompensation_flag = 0; % Set this flag to one if you want to turn on the delay compensation/enhancement
-global FARENDFFT;
-global NEARENDFFT;
-global F;
-
-if useHTC
-%    fid=fopen('./htcTouchHd/nb/aecFar.pcm'); xFar=fread(fid,'short'); fclose(fid);
-%    fid=fopen('./htcTouchHd/nb/aecNear.pcm'); yNear=fread(fid,'short'); fclose(fid);
-%    fid=fopen('./samsungBlackjack/nb/aecFar.pcm'); xFar=fread(fid,'short'); fclose(fid);
-%    fid=fopen('./samsungBlackjack/nb/aecNear.pcm'); yNear=fread(fid,'short'); fclose(fid);
-%     fid=fopen('aecFarPoor.pcm'); xFar=fread(fid,'short'); fclose(fid);
-%     fid=fopen('aecNearPoor.pcm'); yNear=fread(fid,'short'); fclose(fid);
-%     fid=fopen('out_aes.pcm'); outAES=fread(fid,'short'); fclose(fid);
-   fid=fopen('aecFar4.pcm'); xFar=fread(fid,'short'); fclose(fid);
-   fid=fopen('aecNear4.pcm'); yNear=fread(fid,'short'); fclose(fid);
-    yNearSpeech = zeros(size(xFar));
-     fs = 8000;
-     frameSize = 64;
-%     frameSize = 128;
-     fs = 16000;
-%     frameSize = 256;
-%F = load('fftValues.txt');
-%FARENDFFT = F(:,1:33);
-%NEARENDFFT = F(:,34:66);
-
-else
-    loadFileFar = [speakerType, '_s_',scenario,'_far_b.wav'];
-    [xFar,fs,nbits] = wavread(loadFileFar);
-    xFar = xFar*2^(nbits-1);
-    loadFileNear = [speakerType, '_s_',scenario,'_near_b.wav'];
-    [yNear,fs,nbits] = wavread(loadFileNear);
-    yNear = yNear*2^(nbits-1);
-    loadFileNearSpeech = [speakerType, '_s_',scenario,'_nearSpeech_b.wav'];
-    [yNearSpeech,fs,nbits] = wavread(loadFileNearSpeech);
-    yNearSpeech = yNearSpeech*2^(nbits-1);
-    frameSize = 256;
-end
-
-dtRegions = [];
-
-% General settings for the AECM
-setupStruct = struct(...
-    'stepSize_flag', 1,...      % This flag turns on the step size calculation. If turned off, mu = 0.25.
-    'supGain_flag', 0,...       % This flag turns on the suppression gain calculation. If turned off, gam = 1.
-    'channelUpdate_flag', 0,... % This flag turns on the channel update. If turned off, H is updated for convLength and then kept constant.
-    'nlp_flag', 0,...           % Turn on/off NLP
-    'withVAD_flag', 0,...           % Turn on/off NLP
-    'useSubBand', 0,...         % Set to 1 if to use subBands
-    'useDelayEstimation', 1,... % Set to 1 if to use delay estimation
-    'support', frameSize,...    % # of samples per frame
-    'samplingfreq',fs,...       % Sampling frequency
-    'oversampling', 2,...       % Overlap between blocks/frames
-    'updatel', 0,...            % # of samples between blocks
-    'hsupport1', 0,...          % # of bins in frequency domain
-    'factor', 0,...             % synthesis window amplification
-    'tlength', 0,...            % # of samples of entire file
-    'updateno', 0,...           % # of updates
-    'nb', 1,...                 % # of blocks
-    'currentBlock', 0,...       %
-    'win', zeros(frameSize,1),...% Window to apply for fft and synthesis
-    'avtime', 1,...             % Time (in sec.) to perform averaging
-    'estLen', 0,...             % Averaging in # of blocks
-    'A_GAIN', 10.0,...          % 
-    'suppress_overdrive', 1.0,...   % overdrive factor for suppression 1.4 is good
-    'gamma_echo', 1.0,...       % same as suppress_overdrive but at different place
-    'de_echo_bound', 0.0,...    %
-    'nl_alpha', 0.4,...         % memory; seems not very critical
-    'nlSeverity', 0.2,...         % nonlinearity severity: 0 does nothing; 1 suppresses all
-    'numInBand', [],...         % # of frequency bins in resp. subBand
-    'centerFreq', [],...        % Center frequency of resp. subBand
-    'dtRegions', dtRegions,...  % Regions where we have DT
-    'subBandLength', frameSize/2);%All bins
-    %'subBandLength', 11);       %Something's wrong when subBandLength even
-    %'nl_alpha', 0.8,...         % memory; seems not very critical
-
-delayStruct = struct(...
-    'bandfirst', 8,...
-    'bandlast', 25,...
-    'smlength', 600,...
-    'maxDelay', 0.4,...
-    'oneGoodEstimate', 0,...
-    'delayAdjust', 0,...
-    'maxDelayb', 0);
-% More parameters in delayStruct are constructed in "updateSettings" below
-
-% Make struct settings
-[setupStruct, delayStruct] = updateSettings(yNear, xFar, setupStruct, delayStruct);
-setupStruct.numInBand = ones(setupStruct.hsupport1,1);
-
-Q = 1; % Time diversity in channel
-% General settings for the step size calculation
-muStruct = struct(...
-    'countInInterval', 0,...
-    'countOutHighInterval', 0,...
-    'countOutLowInterval', 0,...
-    'minInInterval', 50,...
-    'minOutHighInterval', 10,...
-    'minOutLowInterval', 10,...
-    'maxOutLowInterval', 50);
-% General settings for the AECM
-aecmStruct = struct(...
-    'plotIt', 0,... % Set to 0 to turn off plotting
-    'useSubBand', 0,...
-    'bandFactor', 1,...
-    'H', zeros(setupStruct.subBandLength+1,Q),...
-    'HStored', zeros(setupStruct.subBandLength+1,Q),...
-    'X', zeros(setupStruct.subBandLength+1,Q),...
-    'energyThres', 0.28,...
-    'energyThresMSE', 0.4,...
-    'energyMin', inf,...
-    'energyMax', -inf,...
-    'energyLevel', 0,...
-    'energyLevelMSE', 0,...
-    'convLength', 100,...
-    'gammaLog', ones(setupStruct.updateno,1),...
-    'muLog', ones(setupStruct.updateno,1),...
-    'enerFar', zeros(setupStruct.updateno,1),...
-    'enerNear', zeros(setupStruct.updateno,1),...
-    'enerEcho', zeros(setupStruct.updateno,1),...
-    'enerEchoStored', zeros(setupStruct.updateno,1),...
-    'enerOut', zeros(setupStruct.updateno,1),...
-    'runningfmean', 0,...
-    'muStruct', muStruct,...
-    'varMean', 0,...
-    'countMseH', 0,...
-    'mseHThreshold', 1.1,...
-    'mseHStoredOld', inf,...
-    'mseHLatestOld', inf,...
-    'delayLatestS', zeros(1,51),...
-    'feedbackDelay', 0,...
-    'feedbackDelayUpdate', 0,...
-    'cntIn', 0,...
-    'cntOut', 0,...
-    'FAR_ENERGY_MIN', 1,...
-    'ENERGY_DEV_OFFSET', 0.5,...
-    'ENERGY_DEV_TOL', 1.5,...
-    'MU_MIN', -16,...
-    'MU_MAX', -2,...
-    'newDelayCurve', 0);
-
-% Adjust speech signals
-xFar = [zeros(setupStruct.hsupport1-1,1);xFar(1:setupStruct.tlength)];
-yNear = [zeros(setupStruct.hsupport1-1,1);yNear(1:setupStruct.tlength)];
-yNearSpeech = [zeros(setupStruct.hsupport1-1,1);yNearSpeech(1:setupStruct.tlength)];
-xFar = xFar(1:setupStruct.tlength);
-yNear = yNear(1:setupStruct.tlength);
-
-% Set figure settings
-if aecmStruct.plotIt
-    figure(13)
-    set(gcf,'doublebuffer','on')
-end
-%%%%%%%%%%
-% Here starts the algorithm
-% Dividing into frames and then estimating the near end speech
-%%%%%%%%%%
-fTheFarEnd      = complex(zeros(setupStruct.hsupport1,1));
-afTheFarEnd     = zeros(setupStruct.hsupport1,setupStruct.updateno+1);
-fFar            = zeros(setupStruct.hsupport1,setupStruct.updateno+1);
-fmicrophone     = complex(zeros(setupStruct.hsupport1,1));
-afmicrophone    = zeros(setupStruct.hsupport1,setupStruct.updateno+1);
-fNear           = zeros(setupStruct.hsupport1,setupStruct.updateno+1);
-femicrophone    = complex(zeros(setupStruct.hsupport1,1));
-emicrophone     = zeros(setupStruct.tlength,1);
-
-if (setupStruct.useDelayEstimation == 2)
-    delSamples = [1641 1895 2032 1895 2311 2000 2350 2222 NaN 2332 2330 2290 2401 2415 NaN 2393 2305 2381 2398];
-    delBlocks = round(delSamples/setupStruct.updatel);
-    delStarts = floor([25138 46844 105991 169901 195739 218536 241803 333905 347703 362660 373753 745135 765887 788078 806257 823835 842443 860139 881869]/setupStruct.updatel);
-else
-    delStarts = [];
-end
-
-for i=1:setupStruct.updateno
-    setupStruct.currentBlock = i;
-    
-    sb = (i-1)*setupStruct.updatel + 1;
-    se = sb + setupStruct.support - 1;
-    
-    %%%%%%%
-    % Analysis FFTs
-    %%%%%%%
-    % Far end signal
-    temp = fft(setupStruct.win .* xFar(sb:se))/frameSize;
-    fTheFarEnd = temp(1:setupStruct.hsupport1);
-    afTheFarEnd(:,i) = abs(fTheFarEnd);
-    fFar(:,i) = fTheFarEnd;
-    % Near end signal
-    temp = fft(setupStruct.win .* yNear(sb:se))/frameSize;%,pause
-    fmicrophone = temp(1:setupStruct.hsupport1);
-    afmicrophone(:,i) = abs(fmicrophone);
-    fNear(:,i) = fmicrophone;
-    %abs(fmicrophone),pause
-    % The true near end speaker (if we have such info)
-    temp = fft(setupStruct.win .* yNearSpeech(sb:se));
-    aftrueSpeech = abs(temp(1:setupStruct.hsupport1));
-    
-    if(i == 1000)
-        %break;
-    end
-    
-    % Perform delay estimation
-    if (setupStruct.useDelayEstimation == 1)
-        % Delay Estimation
-        delayStruct = align(fTheFarEnd, fmicrophone, delayStruct, i);
-        %delayStruct.delay(i) = 39;%19;
-        idel =  max(i - delayStruct.delay(i) + 1,1);
-        
-        if delayCompensation_flag
-            % If we have a new delay estimate from Bastiaan's alg. update the offset
-            if (delayStruct.delay(i) ~= delayStruct.delay(max(1,i-1)))
-                delayStruct.delayAdjust = delayStruct.delayAdjust + delayStruct.delay(i) - delayStruct.delay(i-1);
-            end
-            % Store the compensated delay
-            delayStruct.delayNew(i) = delayStruct.delay(i) - delayStruct.delayAdjust;
-            if (delayStruct.delayNew(i) < 1)
-                % Something's wrong
-                pause,break
-            end
-            % Compensate with the offset estimate
-            idel = idel + delayStruct.delayAdjust;
-        end
-        if 0%aecmStruct.plotIt
-            figure(1)
-            plot(1:i,delayStruct.delay(1:i),'k:',1:i,delayStruct.delayNew(1:i),'k--','LineWidth',2),drawnow
-        end
-    elseif (setupStruct.useDelayEstimation == 2)
-        % Use "manual delay"
-        delIndex = find(delStarts<i);
-        if isempty(delIndex)
-            idel = i;
-        else
-            idel = i - delBlocks(max(delIndex));
-            if isnan(idel)
-                idel = i - delBlocks(max(delIndex)-1);
-            end
-        end
-    else
-        % No delay estimation
-        %idel = max(i - 18, 1);
-        idel = max(i - 50, 1);
-    end
-
-    %%%%%%%%
-    % This is the AECM algorithm
-    %
-    % Output is the new frequency domain signal (hopefully) echo compensated
-    %%%%%%%%
-    [femicrophone, aecmStruct] = AECMobile(fmicrophone, afTheFarEnd(:,idel), setupStruct, aecmStruct);
-    %[femicrophone, aecmStruct] = AECMobile(fmicrophone, FARENDFFT(idel,:)'/2^F(idel,end-1), setupStruct, aecmStruct);
-    
-    if aecmStruct.feedbackDelayUpdate
-        % If the feedback tells us there is a new offset out there update the enhancement
-        delayStruct.delayAdjust = delayStruct.delayAdjust + aecmStruct.feedbackDelay;
-        aecmStruct.feedbackDelayUpdate = 0;
-    end
-    
-    % reconstruction; first make spectrum odd
-    temp = [femicrophone; flipud(conj(femicrophone(2:(setupStruct.hsupport1-1))))];
-    emicrophone(sb:se) = emicrophone(sb:se) + setupStruct.factor * setupStruct.win .* real(ifft(temp))*frameSize;
-    if max(isnan(emicrophone(sb:se)))
-        % Something's wrong with the output at block i
-        i
-        break
-    end
-end
-
-
-if useHTC
-    fid=fopen('aecOutMatlabC.pcm','w');fwrite(fid,int16(emicrophone),'short');fclose(fid);
-    %fid=fopen('farendFFT.txt','w');fwrite(fid,int16(afTheFarEnd(:)),'short');fclose(fid);
-    %fid=fopen('farendFFTreal.txt','w');fwrite(fid,int16(imag(fFar(:))),'short');fclose(fid);
-    %fid=fopen('farendFFTimag.txt','w');fwrite(fid,int16(real(fFar(:))),'short');fclose(fid);
-    %fid=fopen('nearendFFT.txt','w');fwrite(fid,int16(afmicrophone(:)),'short');fclose(fid);
-    %fid=fopen('nearendFFTreal.txt','w');fwrite(fid,int16(real(fNear(:))),'short');fclose(fid);
-    %fid=fopen('nearendFFTimag.txt','w');fwrite(fid,int16(imag(fNear(:))),'short');fclose(fid);
-end
-if useHTC
-    %spclab(setupStruct.samplingfreq,xFar,yNear,emicrophone)
-else
-    spclab(setupStruct.samplingfreq,xFar,yNear,emicrophone,yNearSpeech)
-end    
diff --git a/src/modules/audio_processing/aecm/main/matlab/matlab/simEnvironment.m b/src/modules/audio_processing/aecm/main/matlab/matlab/simEnvironment.m
deleted file mode 100644
index 3ebe701..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/matlab/simEnvironment.m
+++ /dev/null
@@ -1,15 +0,0 @@
-speakerType = 'fm';
-%for k=2:5
-%for k=[2 4 5]
-for k=3
-    scenario = int2str(k);
-    fprintf('Current scenario: %d\n',k)
-    mainProgram
-    %saveFile = [speakerType, '_s_',scenario,'_delayEst_v2_vad_man.wav'];
-    %wavwrite(emic,fs,nbits,saveFile);
-    %saveFile = ['P:\Engineering_share\BjornV\AECM\',speakerType, '_s_',scenario,'_delayEst_v2_vad_man.pcm'];
-    %saveFile = [speakerType, '_s_',scenario,'_adaptMu_adaptGamma_withVar_gammFilt_HSt.pcm'];
-    saveFile = ['scenario_',scenario,'_090417_backupH_nlp.pcm'];
-    fid=fopen(saveFile,'w');fwrite(fid,int16(emicrophone),'short');fclose(fid);
-    %pause
-end
diff --git a/src/modules/audio_processing/aecm/main/matlab/matlab/updateSettings.m b/src/modules/audio_processing/aecm/main/matlab/matlab/updateSettings.m
deleted file mode 100644
index c805f1d..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/matlab/updateSettings.m
+++ /dev/null
@@ -1,94 +0,0 @@
-function [setupStructNew, delayStructNew] = updateSettings(microphone, TheFarEnd, setupStruct, delayStruct);
-
-% other, constants
-setupStruct.hsupport1 = setupStruct.support/2 + 1;
-setupStruct.factor =  2 / setupStruct.oversampling;
-setupStruct.updatel = setupStruct.support/setupStruct.oversampling;
-setupStruct.estLen = round(setupStruct.avtime * setupStruct.samplingfreq/setupStruct.updatel);
-
-% compute some constants
-blockLen = setupStruct.support/setupStruct.oversampling;
-delayStruct.maxDelayb = floor(setupStruct.samplingfreq*delayStruct.maxDelay/setupStruct.updatel); % in blocks
-
-%input
-tlength = min([length(microphone),length(TheFarEnd)]);
-updateno = floor(tlength/setupStruct.updatel);
-setupStruct.tlength = setupStruct.updatel*updateno;
-setupStruct.updateno = updateno - setupStruct.oversampling + 1;
-
-% signal length
-n = floor(min([length(TheFarEnd), length(microphone)])/setupStruct.support)*setupStruct.support;
-setupStruct.nb = n/blockLen - setupStruct.oversampling + 1; % in blocks
-
-setupStruct.win = sqrt([0 ; hanning(setupStruct.support-1)]);
-
-% Construct filterbank in Bark-scale
-
-K = setupStruct.subBandLength; %Something's wrong when K even
-erbs = 21.4*log10(0.00437*setupStruct.samplingfreq/2+1);
-fe = (10.^((0:K)'*erbs/K/21.4)-1)/0.00437;
-setupStruct.centerFreq = fe;
-H = diag(ones(1,K-1))+diag(ones(1,K-2),-1);
-Hinv = inv(H);
-aty = 2*Hinv(end,:)*fe(2:end-1);
-boundary = aty - (setupStruct.samplingfreq/2 + fe(end-1))/2;
-if rem(K,2)
-    x1 = min([fe(2)/2, -boundary]);
-else
-    x1 = max([0, boundary]);
-end
-%x1
-g = fe(2:end-1);
-g(1) = g(1) - x1/2;
-x = 2*Hinv*g;
-x = [x1;x];
-%figure(42), clf
-xy = zeros((K+1)*4,1);
-yy = zeros((K+1)*4,1);
-xy(1:4) = [fe(1) fe(1) x(1) x(1)]';
-yy(1:4) = [0 1 1 0]'/x(1);
-for kk=2:K
-    xy((kk-1)*4+(1:4)) = [x(kk-1) x(kk-1) x(kk) x(kk)]';
-    yy((kk-1)*4+(1:4)) = [0 1 1 0]'/(x(kk)-x(kk-1));
-end
-xy(end-3:end) = [x(K) x(K) fe(end) fe(end)]';
-yy(end-3:end) = [0 1 1 0]'/(fe(end)*2-2*x(K));
-%plot(xy,yy,'LineWidth',2)
-%fill(xy,yy,'y')
-
-x = [0;x];
-xk = x*setupStruct.hsupport1/setupStruct.samplingfreq*2;
-%setupStruct.erbBoundaries = xk;
-numInBand = zeros(length(xk),1);
-xh = (0:setupStruct.hsupport1-1);
-
-for kk=1:length(xk)
-    if (kk==length(xk))
-        numInBand(kk) = length(find(xh>=xk(kk)));
-    else
-        numInBand(kk) = length(intersect(find(xh>=xk(kk)),find(xh<xk(kk+1))));
-    end
-end
-setupStruct.numInBand = numInBand;
-
-setupStructNew = setupStruct;
-
-delayStructNew = struct(...
-    'sxAll2',zeros(setupStructNew.hsupport1,setupStructNew.nb),...
-    'syAll2',zeros(setupStructNew.hsupport1,setupStructNew.nb),...
-    'z200',zeros(5,setupStructNew.hsupport1),...
-    'z500',zeros(5,delayStruct.maxDelayb+1),...
-    'bxspectrum',uint32(zeros(setupStructNew.nb,1)),...
-    'byspectrum',uint32(zeros(setupStructNew.nb,1)),...
-    'bandfirst',delayStruct.bandfirst,'bandlast',delayStruct.bandlast,...
-    'bxhist',uint32(zeros(delayStruct.maxDelayb+1,1)),...
-    'bcount',zeros(1+delayStruct.maxDelayb,setupStructNew.nb),...
-    'fout',zeros(1+delayStruct.maxDelayb,setupStructNew.nb),...
-    'new',zeros(1+delayStruct.maxDelayb,setupStructNew.nb),...
-    'smlength',delayStruct.smlength,...
-    'maxDelay', delayStruct.maxDelay,...
-    'maxDelayb', delayStruct.maxDelayb,...
-    'oneGoodEstimate', 0,...
-    'delayAdjust', 0,...
-    'delayNew',zeros(setupStructNew.nb,1),...
-    'delay',zeros(setupStructNew.nb,1));
diff --git a/src/modules/audio_processing/aecm/main/matlab/waitbar_j.m b/src/modules/audio_processing/aecm/main/matlab/waitbar_j.m
deleted file mode 100644
index 50b9ccf..0000000
--- a/src/modules/audio_processing/aecm/main/matlab/waitbar_j.m
+++ /dev/null
@@ -1,234 +0,0 @@
-function fout = waitbar_j(x,whichbar, varargin)
-%WAITBAR Display wait bar.
-%   H = WAITBAR(X,'title', property, value, property, value, ...) 
-%   creates and displays a waitbar of fractional length X.  The 
-%   handle to the waitbar figure is returned in H.
-%   X should be between 0 and 1.  Optional arguments property and 
-%   value allow to set corresponding waitbar figure properties.
-%   Property can also be an action keyword 'CreateCancelBtn', in 
-%   which case a cancel button will be added to the figure, and 
-%   the passed value string will be executed upon clicking on the 
-%   cancel button or the close figure button.
-%
-%   WAITBAR(X) will set the length of the bar in the most recently
-%   created waitbar window to the fractional length X.
-%
-%   WAITBAR(X,H) will set the length of the bar in waitbar H
-%   to the fractional length X.
-%
-%   WAITBAR(X,H,'updated title') will update the title text in
-%   the waitbar figure, in addition to setting the fractional
-%   length to X.
-%
-%   WAITBAR is typically used inside a FOR loop that performs a 
-%   lengthy computation.  A sample usage is shown below:
-%
-%       h = waitbar(0,'Please wait...');
-%       for i=1:100,
-%           % computation here %
-%           waitbar(i/100,h)
-%       end
-%       close(h)
-
-%   Clay M. Thompson 11-9-92
-%   Vlad Kolesnikov  06-7-99
-%   Copyright 1984-2001 The MathWorks, Inc.
-%   $Revision: 1.22 $  $Date: 2001/04/15 12:03:29 $
-
-if nargin>=2
-    if ischar(whichbar)
-        type=2; %we are initializing
-        name=whichbar;
-    elseif isnumeric(whichbar)
-        type=1; %we are updating, given a handle
-        f=whichbar;
-    else
-        error(['Input arguments of type ' class(whichbar) ' not valid.'])
-    end
-elseif nargin==1
-    f = findobj(allchild(0),'flat','Tag','TMWWaitbar');
-    
-    if isempty(f)
-        type=2;
-        name='Waitbar';
-    else
-        type=1;
-        f=f(1);
-    end   
-else
-    error('Input arguments not valid.');
-end
-
-x = max(0,min(100*x,100));
-
-switch type
- case 1,  % waitbar(x)    update
-  p = findobj(f,'Type','patch');
-  l = findobj(f,'Type','line');
-  if isempty(f) | isempty(p) | isempty(l), 
-      error('Couldn''t find waitbar handles.'); 
-  end
-  xpatch = get(p,'XData');
-  xpatch = [0 x x 0];
-  set(p,'XData',xpatch)
-  xline = get(l,'XData');
-  set(l,'XData',xline);
-  
-  if nargin>2,
-      % Update waitbar title:
-      hAxes = findobj(f,'type','axes');
-      hTitle = get(hAxes,'title');
-      set(hTitle,'string',varargin{1});
-  end
-  
- case 2,  % waitbar(x,name)  initialize
-  vertMargin = 0;
-  if nargin > 2,
-      % we have optional arguments: property-value pairs
-      if rem (nargin, 2 ) ~= 0
-          error( 'Optional initialization arguments must be passed in pairs' );
-      end
-  end
-  
-  oldRootUnits = get(0,'Units');
-
-  set(0, 'Units', 'points');
-  screenSize = get(0,'ScreenSize');
-  
-  axFontSize=get(0,'FactoryAxesFontSize');
-  
-  pointsPerPixel = 72/get(0,'ScreenPixelsPerInch');
-  
-  width = 360 * pointsPerPixel;
-  height = 75 * pointsPerPixel;
-  pos = [screenSize(3)/2-width/2 screenSize(4)/2-height/2 width height];
-
-%pos=  [501.75 589.5 393.75 52.5];
-  f = figure(...
-      'Units', 'points', ...
-      'BusyAction', 'queue', ...
-      'Position', pos, ...
-      'Resize','on', ...
-      'CreateFcn','', ...
-      'NumberTitle','off', ...
-      'IntegerHandle','off', ...
-      'MenuBar', 'none', ...
-      'Tag','TMWWaitbar',...
-      'Interruptible', 'off', ...
-      'Visible','on');
-  
-  %%%%%%%%%%%%%%%%%%%%%
-  % set figure properties as passed to the fcn
-  % pay special attention to the 'cancel' request
-  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-  if nargin > 2,
-      propList = varargin(1:2:end);
-      valueList = varargin(2:2:end);
-      cancelBtnCreated = 0;
-      for ii = 1:length( propList )
-          try
-              if strcmp(lower(propList{ii}), 'createcancelbtn' ) & ~cancelBtnCreated
-                  cancelBtnHeight = 23 * pointsPerPixel;
-                  cancelBtnWidth = 60 * pointsPerPixel;
-                  newPos = pos;
-                  vertMargin = vertMargin + cancelBtnHeight;
-                  newPos(4) = newPos(4)+vertMargin;
-                  callbackFcn = [valueList{ii}];
-                  set( f, 'Position', newPos, 'CloseRequestFcn', callbackFcn );
-                  cancelButt = uicontrol('Parent',f, ...
-                                         'Units','points', ...
-                                         'Callback',callbackFcn, ...
-                                         'ButtonDownFcn', callbackFcn, ...
-                                         'Enable','on', ...
-                                         'Interruptible','off', ...
-                                         'Position', [pos(3)-cancelBtnWidth*1.4, 7,  ...
-                    cancelBtnWidth, cancelBtnHeight], ...
-                                         'String','Cancel', ...
-                                         'Tag','TMWWaitbarCancelButton');
-                  cancelBtnCreated = 1;
-              else
-                  % simply set the prop/value pair of the figure
-                  set( f, propList{ii}, valueList{ii});
-              end
-          catch
-              disp ( ['Warning: could not set property ''' propList{ii} ''' with value ''' num2str(valueList{ii}) '''' ] );
-          end
-      end
-  end  
-  
-  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%  
-  
-  
-  colormap([]);
-  
-  axNorm=[.05 .3 .9 .2];
- % axNorm=[1 1 1 1];
-  axPos=axNorm.*[pos(3:4),pos(3:4)] + [0 vertMargin 0 0];
-  
-  h = axes('XLim',[0 100],...
-           'YLim',[0 1],...
-           'Box','on', ...
-           'Units','Points',...
-           'FontSize', axFontSize,...
-           'Position',axPos,...
-           'XTickMode','manual',...
-           'YTickMode','manual',...
-           'XTick',[],...
-           'YTick',[],...
-           'XTickLabelMode','manual',...
-           'XTickLabel',[],...
-           'YTickLabelMode','manual',...
-           'YTickLabel',[]);
-  
-  tHandle=title(name);
-  tHandle=get(h,'title');
-  oldTitleUnits=get(tHandle,'Units');
-  set(tHandle,...
-      'Units',      'points',...
-      'String',     name);
-  
-  tExtent=get(tHandle,'Extent');
-  set(tHandle,'Units',oldTitleUnits);
-  
-  titleHeight=tExtent(4)+axPos(2)+axPos(4)+5;
-  if titleHeight>pos(4)
-      pos(4)=titleHeight;
-      pos(2)=screenSize(4)/2-pos(4)/2;
-      figPosDirty=logical(1);
-  else
-      figPosDirty=logical(0);
-  end
-  
-  if tExtent(3)>pos(3)*1.10;
-      pos(3)=min(tExtent(3)*1.10,screenSize(3));
-      pos(1)=screenSize(3)/2-pos(3)/2;
-      
-      axPos([1,3])=axNorm([1,3])*pos(3);
-      set(h,'Position',axPos);
-      
-      figPosDirty=logical(1);
-  end
-  
-  if figPosDirty
-      set(f,'Position',pos);
-  end
-
-  xpatch = [0 x x 0];
-  ypatch = [0 0 1 1];
-   xline = [100 0 0 100 100];
-   yline = [0 0 1 1 0];
-  
-  p = patch(xpatch,ypatch,'r','EdgeColor','r','EraseMode','none');
-  l = line(xline,yline,'EraseMode','none');
-  set(l,'Color',get(gca,'XColor'));
-  
-  
-  set(f,'HandleVisibility','callback','visible','on', 'resize','off');
-  
-  set(0, 'Units', oldRootUnits);
-end  % case
-drawnow;
-
-if nargout==1,
-    fout = f;
-end
diff --git a/src/modules/audio_processing/aecm/main/source/Android.mk b/src/modules/audio_processing/aecm/main/source/Android.mk
deleted file mode 100644
index 7ed9f36..0000000
--- a/src/modules/audio_processing/aecm/main/source/Android.mk
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_ARM_MODE := arm
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_MODULE := libwebrtc_aecm
-LOCAL_MODULE_TAGS := optional
-LOCAL_GENERATED_SOURCES :=
-LOCAL_SRC_FILES := echo_control_mobile.c \
-    aecm_core.c 
-
-# Flags passed to both C and C++ files.
-MY_CFLAGS :=  
-MY_CFLAGS_C :=
-MY_DEFS := '-DNO_TCMALLOC' \
-    '-DNO_HEAPCHECKER' \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' \
-    '-DWEBRTC_THREAD_RR'
-ifeq ($(TARGET_ARCH),arm) 
-MY_DEFS += \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
-endif
-LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
-
-# Include paths placed before CFLAGS/CPPFLAGS
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/../../../../.. \
-    $(LOCAL_PATH)/../interface \
-    $(LOCAL_PATH)/../../../utility \
-    $(LOCAL_PATH)/../../../../../common_audio/signal_processing_library/main/interface 
-
-# Flags passed to only C++ (and not C) files.
-LOCAL_CPPFLAGS := 
-
-LOCAL_LDFLAGS :=
-
-LOCAL_STATIC_LIBRARIES :=
-
-LOCAL_SHARED_LIBRARIES := libcutils \
-    libdl \
-    libstlport
-LOCAL_ADDITIONAL_DEPENDENCIES :=
-
-include external/stlport/libstlport.mk
-include $(BUILD_STATIC_LIBRARY)
diff --git a/src/modules/audio_processing/aecm/main/source/aecm_core.c b/src/modules/audio_processing/aecm/main/source/aecm_core.c
deleted file mode 100644
index f17f1bf..0000000
--- a/src/modules/audio_processing/aecm/main/source/aecm_core.c
+++ /dev/null
@@ -1,2534 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdlib.h>
-
-#include "aecm_core.h"
-#include "ring_buffer.h"
-#include "echo_control_mobile.h"
-#include "typedefs.h"
-
-// TODO(bjornv): Will be removed in final version.
-//#include <stdio.h>
-
-#ifdef ARM_WINM_LOG
-#include <stdio.h>
-#include <windows.h>
-#endif
-
-// BANDLAST - BANDFIRST must be < 32
-#define BANDFIRST                   12   // Only bit BANDFIRST through bit BANDLAST are processed
-#define BANDLAST                    43
-
-#ifdef ARM_WINM
-#define WebRtcSpl_AddSatW32(a,b)  _AddSatInt(a,b)
-#define WebRtcSpl_SubSatW32(a,b)  _SubSatInt(a,b)
-#endif
-// 16 instructions on most risc machines for 32-bit bitcount !
-
-#ifdef AEC_DEBUG
-FILE *dfile;
-FILE *testfile;
-#endif
-
-#ifdef AECM_SHORT
-
-// Square root of Hanning window in Q14
-static const WebRtc_Word16 kSqrtHanning[] =
-{
-    0, 804, 1606, 2404, 3196, 3981, 4756, 5520,
-    6270, 7005, 7723, 8423, 9102, 9760, 10394, 11003,
-    11585, 12140, 12665, 13160, 13623, 14053, 14449, 14811,
-    15137, 15426, 15679, 15893, 16069, 16207, 16305, 16364,
-    16384
-};
-
-#else
-
-// Square root of Hanning window in Q14
-static const WebRtc_Word16 kSqrtHanning[] = {0, 399, 798, 1196, 1594, 1990, 2386, 2780, 3172,
-        3562, 3951, 4337, 4720, 5101, 5478, 5853, 6224, 6591, 6954, 7313, 7668, 8019, 8364,
-        8705, 9040, 9370, 9695, 10013, 10326, 10633, 10933, 11227, 11514, 11795, 12068, 12335,
-        12594, 12845, 13089, 13325, 13553, 13773, 13985, 14189, 14384, 14571, 14749, 14918,
-        15079, 15231, 15373, 15506, 15631, 15746, 15851, 15947, 16034, 16111, 16179, 16237,
-        16286, 16325, 16354, 16373, 16384};
-
-#endif
-
-//Q15 alpha = 0.99439986968132  const Factor for magnitude approximation
-static const WebRtc_UWord16 kAlpha1 = 32584;
-//Q15 beta = 0.12967166976970   const Factor for magnitude approximation
-static const WebRtc_UWord16 kBeta1 = 4249;
-//Q15 alpha = 0.94234827210087  const Factor for magnitude approximation
-static const WebRtc_UWord16 kAlpha2 = 30879;
-//Q15 beta = 0.33787806009150   const Factor for magnitude approximation
-static const WebRtc_UWord16 kBeta2 = 11072;
-//Q15 alpha = 0.82247698684306  const Factor for magnitude approximation
-static const WebRtc_UWord16 kAlpha3 = 26951;
-//Q15 beta = 0.57762063060713   const Factor for magnitude approximation
-static const WebRtc_UWord16 kBeta3 = 18927;
-
-// Initialization table for echo channel in 8 kHz
-static const WebRtc_Word16 kChannelStored8kHz[PART_LEN1] = {
-    2040,   1815,   1590,   1498,   1405,   1395,   1385,   1418,
-    1451,   1506,   1562,   1644,   1726,   1804,   1882,   1918,
-    1953,   1982,   2010,   2025,   2040,   2034,   2027,   2021,
-    2014,   1997,   1980,   1925,   1869,   1800,   1732,   1683,
-    1635,   1604,   1572,   1545,   1517,   1481,   1444,   1405,
-    1367,   1331,   1294,   1270,   1245,   1239,   1233,   1247,
-    1260,   1282,   1303,   1338,   1373,   1407,   1441,   1470,
-    1499,   1524,   1549,   1565,   1582,   1601,   1621,   1649,
-    1676
-};
-
-// Initialization table for echo channel in 16 kHz
-static const WebRtc_Word16 kChannelStored16kHz[PART_LEN1] = {
-    2040,   1590,   1405,   1385,   1451,   1562,   1726,   1882,
-    1953,   2010,   2040,   2027,   2014,   1980,   1869,   1732,
-    1635,   1572,   1517,   1444,   1367,   1294,   1245,   1233,
-    1260,   1303,   1373,   1441,   1499,   1549,   1582,   1621,
-    1676,   1741,   1802,   1861,   1921,   1983,   2040,   2102,
-    2170,   2265,   2375,   2515,   2651,   2781,   2922,   3075,
-    3253,   3471,   3738,   3976,   4151,   4258,   4308,   4288,
-    4270,   4253,   4237,   4179,   4086,   3947,   3757,   3484,
-    3153
-};
-
-#ifdef ARM_WINM_LOG
-HANDLE logFile = NULL;
-#endif
-
-static void WebRtcAecm_ComfortNoise(AecmCore_t* const aecm, const WebRtc_UWord16 * const dfa,
-                                    WebRtc_Word16 * const outReal,
-                                    WebRtc_Word16 * const outImag,
-                                    const WebRtc_Word16 * const lambda);
-
-static __inline WebRtc_UWord32 WebRtcAecm_SetBit(WebRtc_UWord32 in, WebRtc_Word32 pos)
-{
-    WebRtc_UWord32 mask, out;
-
-    mask = WEBRTC_SPL_SHIFT_W32(1, pos);
-    out = (in | mask);
-
-    return out;
-}
-
-// WebRtcAecm_Hisser(...)
-//
-// This function compares the binary vector specvec with all rows of the binary matrix specmat
-// and counts per row the number of times they have the same value.
-// Input:
-//       - specvec   : binary "vector"  that is stored in a long
-//       - specmat   : binary "matrix"  that is stored as a vector of long
-// Output:
-//       - bcount    : "Vector" stored as a long, containing for each row the number of times
-//                      the matrix row and the input vector have the same value
-//
-//
-void WebRtcAecm_Hisser(const WebRtc_UWord32 specvec, const WebRtc_UWord32 * const specmat,
-                       WebRtc_UWord32 * const bcount)
-{
-    int n;
-    WebRtc_UWord32 a, b;
-    register WebRtc_UWord32 tmp;
-
-    a = specvec;
-    // compare binary vector specvec with all rows of the binary matrix specmat
-    for (n = 0; n < MAX_DELAY; n++)
-    {
-        b = specmat[n];
-        a = (specvec ^ b);
-        // Returns bit counts in tmp
-        tmp = a - ((a >> 1) & 033333333333) - ((a >> 2) & 011111111111);
-        tmp = ((tmp + (tmp >> 3)) & 030707070707);
-        tmp = (tmp + (tmp >> 6));
-        tmp = (tmp + (tmp >> 12) + (tmp >> 24)) & 077;
-
-        bcount[n] = tmp;
-    }
-}
-
-// WebRtcAecm_BSpectrum(...)
-//
-// Computes the binary spectrum by comparing the input spectrum with a threshold spectrum.
-//
-// Input:
-//       - spectrum  : Spectrum of which the binary spectrum should be calculated.
-//       - thresvec  : Threshold spectrum with which the input spectrum is compared.
-// Return:
-//       - out       : Binary spectrum
-//
-WebRtc_UWord32 WebRtcAecm_BSpectrum(const WebRtc_UWord16 * const spectrum,
-                                    const WebRtc_UWord16 * const thresvec)
-{
-    int k;
-    WebRtc_UWord32 out;
-
-    out = 0;
-    for (k = BANDFIRST; k <= BANDLAST; k++)
-    {
-        if (spectrum[k] > thresvec[k])
-        {
-            out = WebRtcAecm_SetBit(out, k - BANDFIRST);
-        }
-    }
-
-    return out;
-}
-
-//   WebRtcAecm_MedianEstimator(...)
-//
-//   Calculates the median recursively.
-//
-//   Input:
-//           - newVal            :   new additional value
-//           - medianVec         :   vector with current medians
-//           - factor            :   factor for smoothing
-//
-//   Output:
-//           - medianVec         :   vector with updated median
-//
-int WebRtcAecm_MedianEstimator(const WebRtc_UWord16 newVal, WebRtc_UWord16 * const medianVec,
-                               const int factor)
-{
-    WebRtc_Word32 median;
-    WebRtc_Word32 diff;
-
-    median = (WebRtc_Word32)medianVec[0];
-
-    //median = median + ((newVal-median)>>factor);
-    diff = (WebRtc_Word32)newVal - median;
-    diff = WEBRTC_SPL_SHIFT_W32(diff, -factor);
-    median = median + diff;
-
-    medianVec[0] = (WebRtc_UWord16)median;
-
-    return 0;
-}
-
-int WebRtcAecm_CreateCore(AecmCore_t **aecmInst)
-{
-    AecmCore_t *aecm = malloc(sizeof(AecmCore_t));
-    *aecmInst = aecm;
-    if (aecm == NULL)
-    {
-        return -1;
-    }
-
-    if (WebRtcApm_CreateBuffer(&aecm->farFrameBuf, FRAME_LEN + PART_LEN) == -1)
-    {
-        WebRtcAecm_FreeCore(aecm);
-        aecm = NULL;
-        return -1;
-    }
-
-    if (WebRtcApm_CreateBuffer(&aecm->nearNoisyFrameBuf, FRAME_LEN + PART_LEN) == -1)
-    {
-        WebRtcAecm_FreeCore(aecm);
-        aecm = NULL;
-        return -1;
-    }
-
-    if (WebRtcApm_CreateBuffer(&aecm->nearCleanFrameBuf, FRAME_LEN + PART_LEN) == -1)
-    {
-        WebRtcAecm_FreeCore(aecm);
-        aecm = NULL;
-        return -1;
-    }
-
-    if (WebRtcApm_CreateBuffer(&aecm->outFrameBuf, FRAME_LEN + PART_LEN) == -1)
-    {
-        WebRtcAecm_FreeCore(aecm);
-        aecm = NULL;
-        return -1;
-    }
-
-    return 0;
-}
-
-// WebRtcAecm_InitCore(...)
-//
-// This function initializes the AECM instant created with WebRtcAecm_CreateCore(...)
-// Input:
-//      - aecm            : Pointer to the Echo Suppression instance
-//      - samplingFreq   : Sampling Frequency
-//
-// Output:
-//      - aecm            : Initialized instance
-//
-// Return value         :  0 - Ok
-//                        -1 - Error
-//
-int WebRtcAecm_InitCore(AecmCore_t * const aecm, int samplingFreq)
-{
-    int retVal = 0;
-    WebRtc_Word16 i;
-    WebRtc_Word16 tmp16;
-
-    if (samplingFreq != 8000 && samplingFreq != 16000)
-    {
-        samplingFreq = 8000;
-        retVal = -1;
-    }
-    // sanity check of sampling frequency
-    aecm->mult = (WebRtc_Word16)samplingFreq / 8000;
-
-    aecm->farBufWritePos = 0;
-    aecm->farBufReadPos = 0;
-    aecm->knownDelay = 0;
-    aecm->lastKnownDelay = 0;
-
-    WebRtcApm_InitBuffer(aecm->farFrameBuf);
-    WebRtcApm_InitBuffer(aecm->nearNoisyFrameBuf);
-    WebRtcApm_InitBuffer(aecm->nearCleanFrameBuf);
-    WebRtcApm_InitBuffer(aecm->outFrameBuf);
-
-    memset(aecm->xBuf, 0, sizeof(aecm->xBuf));
-    memset(aecm->dBufClean, 0, sizeof(aecm->dBufClean));
-    memset(aecm->dBufNoisy, 0, sizeof(aecm->dBufNoisy));
-    memset(aecm->outBuf, 0, sizeof(WebRtc_Word16) * PART_LEN);
-
-    aecm->seed = 666;
-    aecm->totCount = 0;
-
-    memset(aecm->xfaHistory, 0, sizeof(WebRtc_UWord16) * (PART_LEN1) * MAX_DELAY);
-
-    aecm->delHistoryPos = MAX_DELAY;
-
-    memset(aecm->medianYlogspec, 0, sizeof(WebRtc_UWord16) * PART_LEN1);
-    memset(aecm->medianXlogspec, 0, sizeof(WebRtc_UWord16) * PART_LEN1);
-    memset(aecm->medianBCount, 0, sizeof(WebRtc_UWord16) * MAX_DELAY);
-    memset(aecm->bxHistory, 0, sizeof(aecm->bxHistory));
-
-    // Initialize to reasonable values
-    aecm->currentDelay = 8;
-    aecm->previousDelay = 8;
-    aecm->delayAdjust = 0;
-
-    aecm->nlpFlag = 1;
-    aecm->fixedDelay = -1;
-
-    memset(aecm->xfaQDomainBuf, 0, sizeof(WebRtc_Word16) * MAX_DELAY);
-    aecm->dfaCleanQDomain = 0;
-    aecm->dfaCleanQDomainOld = 0;
-    aecm->dfaNoisyQDomain = 0;
-    aecm->dfaNoisyQDomainOld = 0;
-
-    memset(aecm->nearLogEnergy, 0, sizeof(WebRtc_Word16) * MAX_BUF_LEN);
-    memset(aecm->farLogEnergy, 0, sizeof(WebRtc_Word16) * MAX_BUF_LEN);
-    memset(aecm->echoAdaptLogEnergy, 0, sizeof(WebRtc_Word16) * MAX_BUF_LEN);
-    memset(aecm->echoStoredLogEnergy, 0, sizeof(WebRtc_Word16) * MAX_BUF_LEN);
-
-    // Initialize the echo channels with a stored shape.
-    if (samplingFreq == 8000)
-    {
-        memcpy(aecm->channelAdapt16, kChannelStored8kHz, sizeof(WebRtc_Word16) * PART_LEN1);
-    }
-    else
-    {
-        memcpy(aecm->channelAdapt16, kChannelStored16kHz, sizeof(WebRtc_Word16) * PART_LEN1);
-    }
-    memcpy(aecm->channelStored, aecm->channelAdapt16, sizeof(WebRtc_Word16) * PART_LEN1);
-    for (i = 0; i < PART_LEN1; i++)
-    {
-        aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32(
-            (WebRtc_Word32)(aecm->channelAdapt16[i]), 16);
-    }
-
-    memset(aecm->echoFilt, 0, sizeof(WebRtc_Word32) * PART_LEN1);
-    memset(aecm->nearFilt, 0, sizeof(WebRtc_Word16) * PART_LEN1);
-    aecm->noiseEstCtr = 0;
-
-    aecm->cngMode = AecmTrue;
-
-    // Increase the noise Q domain with increasing frequency, to correspond to the
-    // expected energy levels.
-    // Also shape the initial noise level with this consideration.
-#if (!defined ARM_WINM) && (!defined ARM9E_GCC) && (!defined ANDROID_AECOPT)
-    for (i = 0; i < PART_LEN1; i++)
-    {
-        if (i < PART_LEN1 >> 2)
-        {
-            aecm->noiseEstQDomain[i] = 10;
-            tmp16 = PART_LEN1 - i;
-            aecm->noiseEst[i] = (tmp16 * tmp16) << 4;
-        } else if (i < PART_LEN1 >> 1)
-        {
-            aecm->noiseEstQDomain[i] = 11;
-            tmp16 = PART_LEN1 - i;
-            aecm->noiseEst[i] = ((tmp16 * tmp16) << 4) << 1;
-        } else
-        {
-            aecm->noiseEstQDomain[i] = 12;
-            aecm->noiseEst[i] = aecm->noiseEst[(PART_LEN1 >> 1) - 1] << 1;
-        }
-    }
-#else
-    for (i = 0; i < PART_LEN1 >> 2; i++)
-    {
-        aecm->noiseEstQDomain[i] = 10;
-        tmp16 = PART_LEN1 - i;
-        aecm->noiseEst[i] = (tmp16 * tmp16) << 4;
-    }
-    for (; i < PART_LEN1 >> 1; i++)
-    {
-        aecm->noiseEstQDomain[i] = 11;
-        tmp16 = PART_LEN1 - i;
-        aecm->noiseEst[i] = ((tmp16 * tmp16) << 4) << 1;
-    }
-    for (; i < PART_LEN1; i++)
-    {
-        aecm->noiseEstQDomain[i] = 12;
-        aecm->noiseEst[i] = aecm->noiseEst[(PART_LEN1 >> 1) - 1] << 1;
-    }
-#endif
-
-    aecm->mseAdaptOld = 1000;
-    aecm->mseStoredOld = 1000;
-    aecm->mseThreshold = WEBRTC_SPL_WORD32_MAX;
-
-    aecm->farEnergyMin = WEBRTC_SPL_WORD16_MAX;
-    aecm->farEnergyMax = WEBRTC_SPL_WORD16_MIN;
-    aecm->farEnergyMaxMin = 0;
-    aecm->farEnergyVAD = FAR_ENERGY_MIN; // This prevents false speech detection at the
-                                         // beginning.
-    aecm->farEnergyMSE = 0;
-    aecm->currentVADValue = 0;
-    aecm->vadUpdateCount = 0;
-    aecm->firstVAD = 1;
-
-    aecm->delayCount = 0;
-    aecm->newDelayCorrData = 0;
-    aecm->lastDelayUpdateCount = 0;
-    memset(aecm->delayCorrelation, 0, sizeof(WebRtc_Word16) * ((CORR_MAX << 1) + 1));
-
-    aecm->startupState = 0;
-    aecm->mseChannelCount = 0;
-    aecm->supGain = SUPGAIN_DEFAULT;
-    aecm->supGainOld = SUPGAIN_DEFAULT;
-    aecm->delayOffsetFlag = 0;
-
-    memset(aecm->delayHistogram, 0, sizeof(aecm->delayHistogram));
-    aecm->delayVadCount = 0;
-    aecm->maxDelayHistIdx = 0;
-    aecm->lastMinPos = 0;
-
-    aecm->supGainErrParamA = SUPGAIN_ERROR_PARAM_A;
-    aecm->supGainErrParamD = SUPGAIN_ERROR_PARAM_D;
-    aecm->supGainErrParamDiffAB = SUPGAIN_ERROR_PARAM_A - SUPGAIN_ERROR_PARAM_B;
-    aecm->supGainErrParamDiffBD = SUPGAIN_ERROR_PARAM_B - SUPGAIN_ERROR_PARAM_D;
-
-    return 0;
-}
-
-int WebRtcAecm_Control(AecmCore_t *aecm, int delay, int nlpFlag, int delayOffsetFlag)
-{
-    aecm->nlpFlag = nlpFlag;
-    aecm->fixedDelay = delay;
-    aecm->delayOffsetFlag = delayOffsetFlag;
-
-    return 0;
-}
-
-// WebRtcAecm_GetNewDelPos(...)
-//
-// Moves the pointer to the next entry. Returns to zero if max position reached.
-//
-// Input:
-//       - aecm     : Pointer to the AECM instance
-// Return:
-//       - pos      : New position in the history.
-//
-//
-WebRtc_Word16 WebRtcAecm_GetNewDelPos(AecmCore_t * const aecm)
-{
-    WebRtc_Word16 pos;
-
-    pos = aecm->delHistoryPos;
-    pos++;
-    if (pos >= MAX_DELAY)
-    {
-        pos = 0;
-    }
-    aecm->delHistoryPos = pos;
-
-    return pos;
-}
-
-// WebRtcAecm_EstimateDelay(...)
-//
-// Estimate the delay of the echo signal.
-//
-// Inputs:
-//      - aecm          : Pointer to the AECM instance
-//      - farSpec       : Delayed farend magnitude spectrum
-//      - nearSpec      : Nearend magnitude spectrum
-//      - stages        : Q-domain of xxFIX and yyFIX (without dynamic Q-domain)
-//      - xfaQ          : normalization factor, i.e., Q-domain before FFT
-// Return:
-//      - delay         : Estimated delay
-//
-WebRtc_Word16 WebRtcAecm_EstimateDelay(AecmCore_t * const aecm,
-                                       const WebRtc_UWord16 * const farSpec,
-                                       const WebRtc_UWord16 * const nearSpec,
-                                       const WebRtc_Word16 xfaQ)
-{
-    WebRtc_UWord32 bxspectrum, byspectrum;
-    WebRtc_UWord32 bcount[MAX_DELAY];
-
-    int i, res;
-
-    WebRtc_UWord16 xmean[PART_LEN1], ymean[PART_LEN1];
-    WebRtc_UWord16 dtmp1;
-    WebRtc_Word16 fcount[MAX_DELAY];
-
-    //WebRtc_Word16 res;
-    WebRtc_Word16 histpos;
-    WebRtc_Word16 maxHistLvl;
-    WebRtc_UWord16 *state;
-    WebRtc_Word16 minpos = -1;
-
-    enum
-    {
-        kVadCountThreshold = 25
-    };
-    enum
-    {
-        kMaxHistogram = 600
-    };
-
-    histpos = WebRtcAecm_GetNewDelPos(aecm);
-
-    for (i = 0; i < PART_LEN1; i++)
-    {
-        aecm->xfaHistory[i][histpos] = farSpec[i];
-
-        state = &(aecm->medianXlogspec[i]);
-        res = WebRtcAecm_MedianEstimator(farSpec[i], state, 6);
-
-        state = &(aecm->medianYlogspec[i]);
-        res = WebRtcAecm_MedianEstimator(nearSpec[i], state, 6);
-
-        //  Mean:
-        //  FLOAT:
-        //  ymean = dtmp2/MAX_DELAY
-        //
-        //  FIX:
-        //  input: dtmp2FIX in Q0
-        //  output: ymeanFIX in Q8
-        //  20 = 1/MAX_DELAY in Q13 = 1/MAX_DELAY * 2^13
-        xmean[i] = (aecm->medianXlogspec[i]);
-        ymean[i] = (aecm->medianYlogspec[i]);
-
-    }
-    // Update Q-domain buffer
-    aecm->xfaQDomainBuf[histpos] = xfaQ;
-
-    // Get binary spectra
-    //  FLOAT:
-    //  bxspectrum = bspectrum(xlogspec, xmean);
-    //
-    //  FIX:
-    //  input:  xlogspecFIX,ylogspecFIX in Q8
-    //          xmeanFIX, ymeanFIX in Q8
-    //  output: unsigned long bxspectrum, byspectrum in Q0
-    bxspectrum = WebRtcAecm_BSpectrum(farSpec, xmean);
-    byspectrum = WebRtcAecm_BSpectrum(nearSpec, ymean);
-
-    // Shift binary spectrum history
-    memmove(&(aecm->bxHistory[1]), &(aecm->bxHistory[0]),
-            (MAX_DELAY - 1) * sizeof(WebRtc_UWord32));
-
-    aecm->bxHistory[0] = bxspectrum;
-
-    // Compare with delayed spectra
-    WebRtcAecm_Hisser(byspectrum, aecm->bxHistory, bcount);
-
-    for (i = 0; i < MAX_DELAY; i++)
-    {
-        // Update sum
-        // bcount is constrained to [0, 32], meaning we can smooth with a factor up to 2^11.
-        dtmp1 = (WebRtc_UWord16)bcount[i];
-        dtmp1 = WEBRTC_SPL_LSHIFT_W16(dtmp1, 9);
-        state = &(aecm->medianBCount[i]);
-        res = WebRtcAecm_MedianEstimator(dtmp1, state, 9);
-        fcount[i] = (aecm->medianBCount[i]);
-    }
-
-    // Find minimum
-    minpos = WebRtcSpl_MinIndexW16(fcount, MAX_DELAY);
-
-    // If the farend has been active sufficiently long, begin accumulating a histogram
-    // of the minimum positions. Search for the maximum bin to determine the delay.
-    if (aecm->currentVADValue == 1)
-    {
-        if (aecm->delayVadCount >= kVadCountThreshold)
-        {
-            // Increment the histogram at the current minimum position.
-            if (aecm->delayHistogram[minpos] < kMaxHistogram)
-            {
-                aecm->delayHistogram[minpos] += 3;
-            }
-
-#if (!defined ARM_WINM) && (!defined ARM9E_GCC) && (!defined ANDROID_AECOPT)
-            // Decrement the entire histogram.
-            for (i = 0; i < MAX_DELAY; i++)
-            {
-                if (aecm->delayHistogram[i] > 0)
-                {
-                    aecm->delayHistogram[i]--;
-                }
-            }
-
-            // Select the histogram index corresponding to the maximum bin as the delay.
-            maxHistLvl = 0;
-            aecm->maxDelayHistIdx = 0;
-            for (i = 0; i < MAX_DELAY; i++)
-            {
-                if (aecm->delayHistogram[i] > maxHistLvl)
-                {
-                    maxHistLvl = aecm->delayHistogram[i];
-                    aecm->maxDelayHistIdx = i;
-                }
-            }
-#else
-            maxHistLvl = 0;
-            aecm->maxDelayHistIdx = 0;
-
-            for (i = 0; i < MAX_DELAY; i++)
-            {
-                WebRtc_Word16 tempVar = aecm->delayHistogram[i];
-
-                // Decrement the entire histogram.
-                if (tempVar > 0)
-                {
-                    tempVar--;
-                    aecm->delayHistogram[i] = tempVar;
-
-                    // Select the histogram index corresponding to the maximum bin as the delay.
-                    if (tempVar > maxHistLvl)
-                    {
-                        maxHistLvl = tempVar;
-                        aecm->maxDelayHistIdx = i;
-                    }
-                }
-            }
-#endif
-        } else
-        {
-            aecm->delayVadCount++;
-        }
-    } else
-    {
-        aecm->delayVadCount = 0;
-    }
-
-    return aecm->maxDelayHistIdx;
-}
-
-int WebRtcAecm_FreeCore(AecmCore_t *aecm)
-{
-    if (aecm == NULL)
-    {
-        return -1;
-    }
-
-    WebRtcApm_FreeBuffer(aecm->farFrameBuf);
-    WebRtcApm_FreeBuffer(aecm->nearNoisyFrameBuf);
-    WebRtcApm_FreeBuffer(aecm->nearCleanFrameBuf);
-    WebRtcApm_FreeBuffer(aecm->outFrameBuf);
-
-    free(aecm);
-
-    return 0;
-}
-
-void WebRtcAecm_ProcessFrame(AecmCore_t * const aecm, const WebRtc_Word16 * const farend,
-                             const WebRtc_Word16 * const nearendNoisy,
-                             const WebRtc_Word16 * const nearendClean,
-                             WebRtc_Word16 * const out)
-{
-    WebRtc_Word16 farBlock[PART_LEN];
-    WebRtc_Word16 nearNoisyBlock[PART_LEN];
-    WebRtc_Word16 nearCleanBlock[PART_LEN];
-    WebRtc_Word16 outBlock[PART_LEN];
-    WebRtc_Word16 farFrame[FRAME_LEN];
-    int size = 0;
-
-    // Buffer the current frame.
-    // Fetch an older one corresponding to the delay.
-    WebRtcAecm_BufferFarFrame(aecm, farend, FRAME_LEN);
-    WebRtcAecm_FetchFarFrame(aecm, farFrame, FRAME_LEN, aecm->knownDelay);
-
-    // Buffer the synchronized far and near frames,
-    // to pass the smaller blocks individually.
-    WebRtcApm_WriteBuffer(aecm->farFrameBuf, farFrame, FRAME_LEN);
-    WebRtcApm_WriteBuffer(aecm->nearNoisyFrameBuf, nearendNoisy, FRAME_LEN);
-    if (nearendClean != NULL)
-    {
-        WebRtcApm_WriteBuffer(aecm->nearCleanFrameBuf, nearendClean, FRAME_LEN);
-    }
-
-    // Process as many blocks as possible.
-    while (WebRtcApm_get_buffer_size(aecm->farFrameBuf) >= PART_LEN)
-    {
-        WebRtcApm_ReadBuffer(aecm->farFrameBuf, farBlock, PART_LEN);
-        WebRtcApm_ReadBuffer(aecm->nearNoisyFrameBuf, nearNoisyBlock, PART_LEN);
-        if (nearendClean != NULL)
-        {
-            WebRtcApm_ReadBuffer(aecm->nearCleanFrameBuf, nearCleanBlock, PART_LEN);
-            WebRtcAecm_ProcessBlock(aecm, farBlock, nearNoisyBlock, nearCleanBlock, outBlock);
-        } else
-        {
-            WebRtcAecm_ProcessBlock(aecm, farBlock, nearNoisyBlock, NULL, outBlock);
-        }
-
-        WebRtcApm_WriteBuffer(aecm->outFrameBuf, outBlock, PART_LEN);
-    }
-
-    // Stuff the out buffer if we have less than a frame to output.
-    // This should only happen for the first frame.
-    size = WebRtcApm_get_buffer_size(aecm->outFrameBuf);
-    if (size < FRAME_LEN)
-    {
-        WebRtcApm_StuffBuffer(aecm->outFrameBuf, FRAME_LEN - size);
-    }
-
-    // Obtain an output frame.
-    WebRtcApm_ReadBuffer(aecm->outFrameBuf, out, FRAME_LEN);
-}
-
-// WebRtcAecm_AsymFilt(...)
-//
-// Performs asymmetric filtering.
-//
-// Inputs:
-//      - filtOld       : Previous filtered value.
-//      - inVal         : New input value.
-//      - stepSizePos   : Step size when we have a positive contribution.
-//      - stepSizeNeg   : Step size when we have a negative contribution.
-//
-// Output:
-//
-// Return: - Filtered value.
-//
-WebRtc_Word16 WebRtcAecm_AsymFilt(const WebRtc_Word16 filtOld, const WebRtc_Word16 inVal,
-                                  const WebRtc_Word16 stepSizePos,
-                                  const WebRtc_Word16 stepSizeNeg)
-{
-    WebRtc_Word16 retVal;
-
-    if ((filtOld == WEBRTC_SPL_WORD16_MAX) | (filtOld == WEBRTC_SPL_WORD16_MIN))
-    {
-        return inVal;
-    }
-    retVal = filtOld;
-    if (filtOld > inVal)
-    {
-        retVal -= WEBRTC_SPL_RSHIFT_W16(filtOld - inVal, stepSizeNeg);
-    } else
-    {
-        retVal += WEBRTC_SPL_RSHIFT_W16(inVal - filtOld, stepSizePos);
-    }
-
-    return retVal;
-}
-
-// WebRtcAecm_CalcEnergies(...)
-//
-// This function calculates the log of energies for nearend, farend and estimated
-// echoes. There is also an update of energy decision levels, i.e. internl VAD.
-//
-//
-// @param  aecm         [i/o]   Handle of the AECM instance.
-// @param  delayDiff    [in]    Delay position in farend buffer.
-// @param  nearEner     [in]    Near end energy for current block (Q[aecm->dfaQDomain]).
-// @param  echoEst      [i/o]   Estimated echo
-//                              (Q[aecm->xfaQDomain[delayDiff]+RESOLUTION_CHANNEL16]).
-//
-void WebRtcAecm_CalcEnergies(AecmCore_t * const aecm, const WebRtc_Word16 delayDiff,
-                             const WebRtc_UWord32 nearEner, WebRtc_Word32 * const echoEst)
-{
-    // Local variables
-    WebRtc_UWord32 tmpAdapt, tmpStored, tmpFar;
-
-    int i;
-
-    WebRtc_Word16 zeros, frac;
-    WebRtc_Word16 tmp16;
-    WebRtc_Word16 increase_max_shifts = 4;
-    WebRtc_Word16 decrease_max_shifts = 11;
-    WebRtc_Word16 increase_min_shifts = 11;
-    WebRtc_Word16 decrease_min_shifts = 3;
-
-    // Get log of near end energy and store in buffer
-
-    // Shift buffer
-    memmove(aecm->nearLogEnergy + 1, aecm->nearLogEnergy,
-            sizeof(WebRtc_Word16) * (MAX_BUF_LEN - 1));
-
-    // Logarithm of integrated magnitude spectrum (nearEner)
-    if (nearEner)
-    {
-        zeros = WebRtcSpl_NormU32(nearEner);
-        frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32(
-                              (WEBRTC_SPL_LSHIFT_U32(nearEner, zeros) & 0x7FFFFFFF),
-                              23);
-        // log2 in Q8
-        aecm->nearLogEnergy[0] = WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
-        aecm->nearLogEnergy[0] -= WEBRTC_SPL_LSHIFT_W16(aecm->dfaNoisyQDomain, 8);
-    } else
-    {
-        aecm->nearLogEnergy[0] = 0;
-    }
-    aecm->nearLogEnergy[0] += WEBRTC_SPL_LSHIFT_W16(PART_LEN_SHIFT, 7);
-    // END: Get log of near end energy
-
-    // Get energy for the delayed far end signal and estimated
-    // echo using both stored and adapted channels.
-    tmpAdapt = 0;
-    tmpStored = 0;
-    tmpFar = 0;
-
-    for (i = 0; i < PART_LEN1; i++)
-    {
-        // Get estimated echo energies for adaptive channel and stored channel
-        echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
-                aecm->xfaHistory[i][delayDiff]);
-        tmpFar += (WebRtc_UWord32)(aecm->xfaHistory[i][delayDiff]);
-        tmpAdapt += WEBRTC_SPL_UMUL_16_16(aecm->channelAdapt16[i],
-                aecm->xfaHistory[i][delayDiff]);
-        tmpStored += (WebRtc_UWord32)echoEst[i];
-    }
-    // Shift buffers
-    memmove(aecm->farLogEnergy + 1, aecm->farLogEnergy,
-            sizeof(WebRtc_Word16) * (MAX_BUF_LEN - 1));
-    memmove(aecm->echoAdaptLogEnergy + 1, aecm->echoAdaptLogEnergy,
-            sizeof(WebRtc_Word16) * (MAX_BUF_LEN - 1));
-    memmove(aecm->echoStoredLogEnergy + 1, aecm->echoStoredLogEnergy,
-            sizeof(WebRtc_Word16) * (MAX_BUF_LEN - 1));
-
-    // Logarithm of delayed far end energy
-    if (tmpFar)
-    {
-        zeros = WebRtcSpl_NormU32(tmpFar);
-        frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpFar, zeros)
-                        & 0x7FFFFFFF), 23);
-        // log2 in Q8
-        aecm->farLogEnergy[0] = WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
-        aecm->farLogEnergy[0] -= WEBRTC_SPL_LSHIFT_W16(aecm->xfaQDomainBuf[delayDiff], 8);
-    } else
-    {
-        aecm->farLogEnergy[0] = 0;
-    }
-    aecm->farLogEnergy[0] += WEBRTC_SPL_LSHIFT_W16(PART_LEN_SHIFT, 7);
-
-    // Logarithm of estimated echo energy through adapted channel
-    if (tmpAdapt)
-    {
-        zeros = WebRtcSpl_NormU32(tmpAdapt);
-        frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpAdapt, zeros)
-                        & 0x7FFFFFFF), 23);
-        //log2 in Q8
-        aecm->echoAdaptLogEnergy[0] = WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
-        aecm->echoAdaptLogEnergy[0]
-                -= WEBRTC_SPL_LSHIFT_W16(RESOLUTION_CHANNEL16 + aecm->xfaQDomainBuf[delayDiff], 8);
-    } else
-    {
-        aecm->echoAdaptLogEnergy[0] = 0;
-    }
-    aecm->echoAdaptLogEnergy[0] += WEBRTC_SPL_LSHIFT_W16(PART_LEN_SHIFT, 7);
-
-    // Logarithm of estimated echo energy through stored channel
-    if (tmpStored)
-    {
-        zeros = WebRtcSpl_NormU32(tmpStored);
-        frac = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32((WEBRTC_SPL_LSHIFT_U32(tmpStored, zeros)
-                        & 0x7FFFFFFF), 23);
-        //log2 in Q8
-        aecm->echoStoredLogEnergy[0] = WEBRTC_SPL_LSHIFT_W16((31 - zeros), 8) + frac;
-        aecm->echoStoredLogEnergy[0]
-                -= WEBRTC_SPL_LSHIFT_W16(RESOLUTION_CHANNEL16 + aecm->xfaQDomainBuf[delayDiff], 8);
-    } else
-    {
-        aecm->echoStoredLogEnergy[0] = 0;
-    }
-    aecm->echoStoredLogEnergy[0] += WEBRTC_SPL_LSHIFT_W16(PART_LEN_SHIFT, 7);
-
-    // Update farend energy levels (min, max, vad, mse)
-    if (aecm->farLogEnergy[0] > FAR_ENERGY_MIN)
-    {
-        if (aecm->startupState == 0)
-        {
-            increase_max_shifts = 2;
-            decrease_min_shifts = 2;
-            increase_min_shifts = 8;
-        }
-
-        aecm->farEnergyMin = WebRtcAecm_AsymFilt(aecm->farEnergyMin, aecm->farLogEnergy[0],
-                                                 increase_min_shifts, decrease_min_shifts);
-        aecm->farEnergyMax = WebRtcAecm_AsymFilt(aecm->farEnergyMax, aecm->farLogEnergy[0],
-                                                 increase_max_shifts, decrease_max_shifts);
-        aecm->farEnergyMaxMin = (aecm->farEnergyMax - aecm->farEnergyMin);
-
-        // Dynamic VAD region size
-        tmp16 = 2560 - aecm->farEnergyMin;
-        if (tmp16 > 0)
-        {
-            tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16, FAR_ENERGY_VAD_REGION, 9);
-        } else
-        {
-            tmp16 = 0;
-        }
-        tmp16 += FAR_ENERGY_VAD_REGION;
-
-        if ((aecm->startupState == 0) | (aecm->vadUpdateCount > 1024))
-        {
-            // In startup phase or VAD update halted
-            aecm->farEnergyVAD = aecm->farEnergyMin + tmp16;
-        } else
-        {
-            if (aecm->farEnergyVAD > aecm->farLogEnergy[0])
-            {
-                aecm->farEnergyVAD += WEBRTC_SPL_RSHIFT_W16(aecm->farLogEnergy[0] + tmp16
-                        - aecm->farEnergyVAD, 6);
-                aecm->vadUpdateCount = 0;
-            } else
-            {
-                aecm->vadUpdateCount++;
-            }
-        }
-        // Put MSE threshold higher than VAD
-        aecm->farEnergyMSE = aecm->farEnergyVAD + (1 << 8);
-    }
-
-    // Update VAD variables
-    if (aecm->farLogEnergy[0] > aecm->farEnergyVAD)
-    {
-        if ((aecm->startupState == 0) | (aecm->farEnergyMaxMin > FAR_ENERGY_DIFF))
-        {
-            // We are in startup or have significant dynamics in input speech level
-            aecm->currentVADValue = 1;
-        }
-    } else
-    {
-        aecm->currentVADValue = 0;
-    }
-    if ((aecm->currentVADValue) && (aecm->firstVAD))
-    {
-        aecm->firstVAD = 0;
-        if (aecm->echoAdaptLogEnergy[0] > aecm->nearLogEnergy[0])
-        {
-            // The estimated echo has higher energy than the near end signal. This means that
-            // the initialization was too aggressive. Scale down by a factor 8
-            for (i = 0; i < PART_LEN1; i++)
-            {
-                aecm->channelAdapt16[i] >>= 3;
-            }
-            // Compensate the adapted echo energy level accordingly.
-            aecm->echoAdaptLogEnergy[0] -= (3 << 8);
-            aecm->firstVAD = 1;
-        }
-    }
-    // END: Energies of delayed far, echo estimates
-    // TODO(bjornv): Will be removed in final version.
-#ifdef VAD_DATA
-    fwrite(&(aecm->currentVADValue), sizeof(WebRtc_Word16), 1, aecm->vad_file);
-    fwrite(&(aecm->currentDelay), sizeof(WebRtc_Word16), 1, aecm->delay_file);
-    fwrite(&(aecm->farLogEnergy[0]), sizeof(WebRtc_Word16), 1, aecm->far_cur_file);
-    fwrite(&(aecm->farEnergyMin), sizeof(WebRtc_Word16), 1, aecm->far_min_file);
-    fwrite(&(aecm->farEnergyMax), sizeof(WebRtc_Word16), 1, aecm->far_max_file);
-    fwrite(&(aecm->farEnergyVAD), sizeof(WebRtc_Word16), 1, aecm->far_vad_file);
-#endif
-}
-
-// WebRtcAecm_CalcStepSize(...)
-//
-// This function calculates the step size used in channel estimation
-//
-//
-// @param  aecm  [in]    Handle of the AECM instance.
-// @param  mu   [out]   (Return value) Stepsize in log2(), i.e. number of shifts.
-//
-//
-WebRtc_Word16 WebRtcAecm_CalcStepSize(AecmCore_t * const aecm)
-{
-
-    WebRtc_Word32 tmp32;
-    WebRtc_Word16 tmp16;
-    WebRtc_Word16 mu;
-
-    // Here we calculate the step size mu used in the
-    // following NLMS based Channel estimation algorithm
-    mu = MU_MAX;
-    if (!aecm->currentVADValue)
-    {
-        // Far end energy level too low, no channel update
-        mu = 0;
-    } else if (aecm->startupState > 0)
-    {
-        if (aecm->farEnergyMin >= aecm->farEnergyMax)
-        {
-            mu = MU_MIN;
-        } else
-        {
-            tmp16 = (aecm->farLogEnergy[0] - aecm->farEnergyMin);
-            tmp32 = WEBRTC_SPL_MUL_16_16(tmp16, MU_DIFF);
-            tmp32 = WebRtcSpl_DivW32W16(tmp32, aecm->farEnergyMaxMin);
-            mu = MU_MIN - 1 - (WebRtc_Word16)(tmp32);
-            // The -1 is an alternative to rounding. This way we get a larger
-            // stepsize, so we in some sense compensate for truncation in NLMS
-        }
-        if (mu < MU_MAX)
-        {
-            mu = MU_MAX; // Equivalent with maximum step size of 2^-MU_MAX
-        }
-    }
-    // END: Update step size
-
-    return mu;
-}
-
-// WebRtcAecm_UpdateChannel(...)
-//
-// This function performs channel estimation. NLMS and decision on channel storage.
-//
-//
-// @param  aecm         [i/o]   Handle of the AECM instance.
-// @param  dfa          [in]    Absolute value of the nearend signal (Q[aecm->dfaQDomain])
-// @param  delayDiff    [in]    Delay position in farend buffer.
-// @param  mu           [in]    NLMS step size.
-// @param  echoEst      [i/o]   Estimated echo
-//                              (Q[aecm->xfaQDomain[delayDiff]+RESOLUTION_CHANNEL16]).
-//
-void WebRtcAecm_UpdateChannel(AecmCore_t * const aecm, const WebRtc_UWord16 * const dfa,
-                              const WebRtc_Word16 delayDiff, const WebRtc_Word16 mu,
-                              WebRtc_Word32 * const echoEst)
-{
-
-    WebRtc_UWord32 tmpU32no1, tmpU32no2;
-    WebRtc_Word32 tmp32no1, tmp32no2;
-    WebRtc_Word32 mseStored;
-    WebRtc_Word32 mseAdapt;
-
-    int i;
-
-    WebRtc_Word16 zerosFar, zerosNum, zerosCh, zerosDfa;
-    WebRtc_Word16 shiftChFar, shiftNum, shift2ResChan;
-    WebRtc_Word16 tmp16no1;
-    WebRtc_Word16 xfaQ, dfaQ;
-
-    // This is the channel estimation algorithm. It is base on NLMS but has a variable step
-    // length, which was calculated above.
-    if (mu)
-    {
-        for (i = 0; i < PART_LEN1; i++)
-        {
-            // Determine norm of channel and farend to make sure we don't get overflow in
-            // multiplication
-            zerosCh = WebRtcSpl_NormU32(aecm->channelAdapt32[i]);
-            zerosFar = WebRtcSpl_NormU32((WebRtc_UWord32)aecm->xfaHistory[i][delayDiff]);
-            if (zerosCh + zerosFar > 31)
-            {
-                // Multiplication is safe
-                tmpU32no1 = WEBRTC_SPL_UMUL_32_16(aecm->channelAdapt32[i],
-                        aecm->xfaHistory[i][delayDiff]);
-                shiftChFar = 0;
-            } else
-            {
-                // We need to shift down before multiplication
-                shiftChFar = 32 - zerosCh - zerosFar;
-                tmpU32no1
-                        = WEBRTC_SPL_UMUL_32_16(WEBRTC_SPL_RSHIFT_W32(aecm->channelAdapt32[i],
-                                        shiftChFar),
-                                aecm->xfaHistory[i][delayDiff]);
-            }
-            // Determine Q-domain of numerator
-            zerosNum = WebRtcSpl_NormU32(tmpU32no1);
-            if (dfa[i])
-            {
-                zerosDfa = WebRtcSpl_NormU32((WebRtc_UWord32)dfa[i]);
-            } else
-            {
-                zerosDfa = 32;
-            }
-            tmp16no1 = zerosDfa - 2 + aecm->dfaNoisyQDomain - RESOLUTION_CHANNEL32
-                    - aecm->xfaQDomainBuf[delayDiff] + shiftChFar;
-            if (zerosNum > tmp16no1 + 1)
-            {
-                xfaQ = tmp16no1;
-                dfaQ = zerosDfa - 2;
-            } else
-            {
-                xfaQ = zerosNum - 2;
-                dfaQ = RESOLUTION_CHANNEL32 + aecm->xfaQDomainBuf[delayDiff]
-                        - aecm->dfaNoisyQDomain - shiftChFar + xfaQ;
-            }
-            // Add in the same Q-domain
-            tmpU32no1 = WEBRTC_SPL_SHIFT_W32(tmpU32no1, xfaQ);
-            tmpU32no2 = WEBRTC_SPL_SHIFT_W32((WebRtc_UWord32)dfa[i], dfaQ);
-            tmp32no1 = (WebRtc_Word32)tmpU32no2 - (WebRtc_Word32)tmpU32no1;
-            zerosNum = WebRtcSpl_NormW32(tmp32no1);
-            if ((tmp32no1) && (aecm->xfaHistory[i][delayDiff] > (CHANNEL_VAD
-                    << aecm->xfaQDomainBuf[delayDiff])))
-            {
-                //
-                // Update is needed
-                //
-                // This is what we would like to compute
-                //
-                // tmp32no1 = dfa[i] - (aecm->channelAdapt[i] * aecm->xfaHistory[i][delayDiff])
-                // tmp32norm = (i + 1)
-                // aecm->channelAdapt[i] += (2^mu) * tmp32no1
-                //                        / (tmp32norm * aecm->xfaHistory[i][delayDiff])
-                //
-
-                // Make sure we don't get overflow in multiplication.
-                if (zerosNum + zerosFar > 31)
-                {
-                    if (tmp32no1 > 0)
-                    {
-                        tmp32no2 = (WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(tmp32no1,
-                                aecm->xfaHistory[i][delayDiff]);
-                    } else
-                    {
-                        tmp32no2 = -(WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(-tmp32no1,
-                                aecm->xfaHistory[i][delayDiff]);
-                    }
-                    shiftNum = 0;
-                } else
-                {
-                    shiftNum = 32 - (zerosNum + zerosFar);
-                    if (tmp32no1 > 0)
-                    {
-                        tmp32no2 = (WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(
-                                WEBRTC_SPL_RSHIFT_W32(tmp32no1, shiftNum),
-                                aecm->xfaHistory[i][delayDiff]);
-                    } else
-                    {
-                        tmp32no2 = -(WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(
-                                WEBRTC_SPL_RSHIFT_W32(-tmp32no1, shiftNum),
-                                aecm->xfaHistory[i][delayDiff]);
-                    }
-                }
-                // Normalize with respect to frequency bin
-                tmp32no2 = WebRtcSpl_DivW32W16(tmp32no2, i + 1);
-                // Make sure we are in the right Q-domain
-                shift2ResChan = shiftNum + shiftChFar - xfaQ - mu - ((30 - zerosFar) << 1);
-                if (WebRtcSpl_NormW32(tmp32no2) < shift2ResChan)
-                {
-                    tmp32no2 = WEBRTC_SPL_WORD32_MAX;
-                } else
-                {
-                    tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, shift2ResChan);
-                }
-                aecm->channelAdapt32[i] = WEBRTC_SPL_ADD_SAT_W32(aecm->channelAdapt32[i],
-                        tmp32no2);
-                if (aecm->channelAdapt32[i] < 0)
-                {
-                    // We can never have negative channel gain
-                    aecm->channelAdapt32[i] = 0;
-                }
-                aecm->channelAdapt16[i]
-                        = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(aecm->channelAdapt32[i], 16);
-            }
-        }
-    }
-    // END: Adaptive channel update
-
-    // Determine if we should store or restore the channel
-    if ((aecm->startupState == 0) & (aecm->currentVADValue))
-    {
-        // During startup we store the channel every block.
-        memcpy(aecm->channelStored, aecm->channelAdapt16, sizeof(WebRtc_Word16) * PART_LEN1);
-        // TODO(bjornv): Will be removed in final version.
-#ifdef STORE_CHANNEL_DATA
-        fwrite(aecm->channelStored, sizeof(WebRtc_Word16), PART_LEN1, aecm->channel_file_init);
-#endif
-        // Recalculate echo estimate
-#if (!defined ARM_WINM) && (!defined ARM9E_GCC) && (!defined ANDROID_AECOPT)
-        for (i = 0; i < PART_LEN1; i++)
-        {
-            echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
-                    aecm->xfaHistory[i][delayDiff]);
-        }
-#else
-        for (i = 0; i < PART_LEN; ) //assume PART_LEN is 4's multiples
-
-        {
-            echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
-                    aecm->xfaHistory[i][delayDiff]);
-            i++;
-            echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
-                    aecm->xfaHistory[i][delayDiff]);
-            i++;
-            echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
-                    aecm->xfaHistory[i][delayDiff]);
-            i++;
-            echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
-                    aecm->xfaHistory[i][delayDiff]);
-            i++;
-        }
-        echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
-                aecm->xfaHistory[i][delayDiff]);
-#endif
-    } else
-    {
-        if (aecm->farLogEnergy[0] < aecm->farEnergyMSE)
-        {
-            aecm->mseChannelCount = 0;
-            aecm->delayCount = 0;
-        } else
-        {
-            aecm->mseChannelCount++;
-            aecm->delayCount++;
-        }
-        // Enough data for validation. Store channel if we can.
-        if (aecm->mseChannelCount >= (MIN_MSE_COUNT + 10))
-        {
-            // We have enough data.
-            // Calculate MSE of "Adapt" and "Stored" versions.
-            // It is actually not MSE, but average absolute error.
-            mseStored = 0;
-            mseAdapt = 0;
-            for (i = 0; i < MIN_MSE_COUNT; i++)
-            {
-                tmp32no1 = ((WebRtc_Word32)aecm->echoStoredLogEnergy[i]
-                        - (WebRtc_Word32)aecm->nearLogEnergy[i]);
-                tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1);
-                mseStored += tmp32no2;
-
-                tmp32no1 = ((WebRtc_Word32)aecm->echoAdaptLogEnergy[i]
-                        - (WebRtc_Word32)aecm->nearLogEnergy[i]);
-                tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1);
-                mseAdapt += tmp32no2;
-            }
-            if (((mseStored << MSE_RESOLUTION) < (MIN_MSE_DIFF * mseAdapt))
-                    & ((aecm->mseStoredOld << MSE_RESOLUTION) < (MIN_MSE_DIFF
-                            * aecm->mseAdaptOld)))
-            {
-                // The stored channel has a significantly lower MSE than the adaptive one for
-                // two consecutive calculations. Reset the adaptive channel.
-                memcpy(aecm->channelAdapt16, aecm->channelStored,
-                       sizeof(WebRtc_Word16) * PART_LEN1);
-                // Restore the W32 channel
-#if (!defined ARM_WINM) && (!defined ARM9E_GCC) && (!defined ANDROID_AECOPT)
-                for (i = 0; i < PART_LEN1; i++)
-                {
-                    aecm->channelAdapt32[i]
-                            = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)aecm->channelStored[i], 16);
-                }
-#else
-                for (i = 0; i < PART_LEN; ) //assume PART_LEN is 4's multiples
-
-                {
-                    aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)aecm->channelStored[i], 16);
-                    i++;
-                    aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)aecm->channelStored[i], 16);
-                    i++;
-                    aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)aecm->channelStored[i], 16);
-                    i++;
-                    aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)aecm->channelStored[i], 16);
-                    i++;
-                }
-                aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)aecm->channelStored[i], 16);
-#endif
-
-            } else if (((MIN_MSE_DIFF * mseStored) > (mseAdapt << MSE_RESOLUTION)) & (mseAdapt
-                    < aecm->mseThreshold) & (aecm->mseAdaptOld < aecm->mseThreshold))
-            {
-                // The adaptive channel has a significantly lower MSE than the stored one.
-                // The MSE for the adaptive channel has also been low for two consecutive
-                // calculations. Store the adaptive channel.
-                memcpy(aecm->channelStored, aecm->channelAdapt16,
-                       sizeof(WebRtc_Word16) * PART_LEN1);
-                // TODO(bjornv): Will be removed in final version.
-#ifdef STORE_CHANNEL_DATA
-                fwrite(aecm->channelStored, sizeof(WebRtc_Word16), PART_LEN1,
-                       aecm->channel_file);
-#endif
-// Recalculate echo estimate
-#if (!defined ARM_WINM) && (!defined ARM9E_GCC) && (!defined ANDROID_AECOPT)
-                for (i = 0; i < PART_LEN1; i++)
-                {
-                    echoEst[i]
-                            = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], aecm->xfaHistory[i][delayDiff]);
-                }
-#else
-                for (i = 0; i < PART_LEN; ) //assume PART_LEN is 4's multiples
-
-                {
-                    echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], aecm->xfaHistory[i][delayDiff]);
-                    i++;
-                    echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], aecm->xfaHistory[i][delayDiff]);
-                    i++;
-                    echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], aecm->xfaHistory[i][delayDiff]);
-                    i++;
-                    echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], aecm->xfaHistory[i][delayDiff]);
-                    i++;
-                }
-                echoEst[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], aecm->xfaHistory[i][delayDiff]);
-#endif
-                // Update threshold
-                if (aecm->mseThreshold == WEBRTC_SPL_WORD32_MAX)
-                {
-                    aecm->mseThreshold = (mseAdapt + aecm->mseAdaptOld);
-                } else
-                {
-                    aecm->mseThreshold += WEBRTC_SPL_MUL_16_16_RSFT(mseAdapt
-                            - WEBRTC_SPL_MUL_16_16_RSFT(aecm->mseThreshold, 5, 3), 205, 8);
-                }
-
-            }
-
-            // Reset counter
-            aecm->mseChannelCount = 0;
-
-            // Store the MSE values.
-            aecm->mseStoredOld = mseStored;
-            aecm->mseAdaptOld = mseAdapt;
-        }
-    }
-    // END: Determine if we should store or reset channel estimate.
-}
-
-// WebRtcAecm_CalcSuppressionGain(...)
-//
-// This function calculates the suppression gain that is used in the Wiener filter.
-//
-//
-// @param  aecm     [i/n]   Handle of the AECM instance.
-// @param  supGain  [out]   (Return value) Suppression gain with which to scale the noise
-//                          level (Q14).
-//
-//
-WebRtc_Word16 WebRtcAecm_CalcSuppressionGain(AecmCore_t * const aecm)
-{
-    WebRtc_Word32 tmp32no1;
-
-    WebRtc_Word16 supGain;
-    WebRtc_Word16 tmp16no1;
-    WebRtc_Word16 dE = 0;
-
-    // Determine suppression gain used in the Wiener filter. The gain is based on a mix of far
-    // end energy and echo estimation error.
-    supGain = SUPGAIN_DEFAULT;
-    // Adjust for the far end signal level. A low signal level indicates no far end signal,
-    // hence we set the suppression gain to 0
-    if (!aecm->currentVADValue)
-    {
-        supGain = 0;
-    } else
-    {
-        // Adjust for possible double talk. If we have large variations in estimation error we
-        // likely have double talk (or poor channel).
-        tmp16no1 = (aecm->nearLogEnergy[0] - aecm->echoStoredLogEnergy[0] - ENERGY_DEV_OFFSET);
-        dE = WEBRTC_SPL_ABS_W16(tmp16no1);
-
-        if (dE < ENERGY_DEV_TOL)
-        {
-            // Likely no double talk. The better estimation, the more we can suppress signal.
-            // Update counters
-            if (dE < SUPGAIN_EPC_DT)
-            {
-                tmp32no1 = WEBRTC_SPL_MUL_16_16(aecm->supGainErrParamDiffAB, dE);
-                tmp32no1 += (SUPGAIN_EPC_DT >> 1);
-                tmp16no1 = (WebRtc_Word16)WebRtcSpl_DivW32W16(tmp32no1, SUPGAIN_EPC_DT);
-                supGain = aecm->supGainErrParamA - tmp16no1;
-            } else
-            {
-                tmp32no1 = WEBRTC_SPL_MUL_16_16(aecm->supGainErrParamDiffBD,
-                                                (ENERGY_DEV_TOL - dE));
-                tmp32no1 += ((ENERGY_DEV_TOL - SUPGAIN_EPC_DT) >> 1);
-                tmp16no1 = (WebRtc_Word16)WebRtcSpl_DivW32W16(tmp32no1, (ENERGY_DEV_TOL
-                        - SUPGAIN_EPC_DT));
-                supGain = aecm->supGainErrParamD + tmp16no1;
-            }
-        } else
-        {
-            // Likely in double talk. Use default value
-            supGain = aecm->supGainErrParamD;
-        }
-    }
-
-    if (supGain > aecm->supGainOld)
-    {
-        tmp16no1 = supGain;
-    } else
-    {
-        tmp16no1 = aecm->supGainOld;
-    }
-    aecm->supGainOld = supGain;
-    if (tmp16no1 < aecm->supGain)
-    {
-        aecm->supGain += (WebRtc_Word16)((tmp16no1 - aecm->supGain) >> 4);
-    } else
-    {
-        aecm->supGain += (WebRtc_Word16)((tmp16no1 - aecm->supGain) >> 4);
-    }
-
-    // END: Update suppression gain
-
-    return aecm->supGain;
-}
-
-// WebRtcAecm_DelayCompensation(...)
-//
-// Secondary delay estimation that can be used as a backup or for validation. This function is
-// still under construction and not activated in current version.
-//
-//
-// @param  aecm  [i/o]   Handle of the AECM instance.
-//
-//
-void WebRtcAecm_DelayCompensation(AecmCore_t * const aecm)
-{
-    int i, j;
-    WebRtc_Word32 delayMeanEcho[CORR_BUF_LEN];
-    WebRtc_Word32 delayMeanNear[CORR_BUF_LEN];
-    WebRtc_Word16 sumBitPattern, bitPatternEcho, bitPatternNear, maxPos, maxValue,
-            maxValueLeft, maxValueRight;
-
-    // Check delay (calculate the delay offset (if we can)).
-    if ((aecm->startupState > 0) & (aecm->delayCount >= CORR_MAX_BUF) & aecm->delayOffsetFlag)
-    {
-        // Calculate mean values
-        for (i = 0; i < CORR_BUF_LEN; i++)
-        {
-            delayMeanEcho[i] = 0;
-            delayMeanNear[i] = 0;
-#if (!defined ARM_WINM) && (!defined ARM9E_GCC) && (!defined ANDROID_AECOPT)
-            for (j = 0; j < CORR_WIDTH; j++)
-            {
-                delayMeanEcho[i] += (WebRtc_Word32)aecm->echoStoredLogEnergy[i + j];
-                delayMeanNear[i] += (WebRtc_Word32)aecm->nearLogEnergy[i + j];
-            }
-#else
-            for (j = 0; j < CORR_WIDTH -1; )
-            {
-                delayMeanEcho[i] += (WebRtc_Word32)aecm->echoStoredLogEnergy[i + j];
-                delayMeanNear[i] += (WebRtc_Word32)aecm->nearLogEnergy[i + j];
-                j++;
-                delayMeanEcho[i] += (WebRtc_Word32)aecm->echoStoredLogEnergy[i + j];
-                delayMeanNear[i] += (WebRtc_Word32)aecm->nearLogEnergy[i + j];
-                j++;
-            }
-            delayMeanEcho[i] += (WebRtc_Word32)aecm->echoStoredLogEnergy[i + j];
-            delayMeanNear[i] += (WebRtc_Word32)aecm->nearLogEnergy[i + j];
-#endif
-        }
-        // Calculate correlation values
-        for (i = 0; i < CORR_BUF_LEN; i++)
-        {
-            sumBitPattern = 0;
-#if (!defined ARM_WINM) && (!defined ARM9E_GCC) && (!defined ANDROID_AECOPT)
-            for (j = 0; j < CORR_WIDTH; j++)
-            {
-                bitPatternEcho = (WebRtc_Word16)((WebRtc_Word32)aecm->echoStoredLogEnergy[i
-                        + j] * CORR_WIDTH > delayMeanEcho[i]);
-                bitPatternNear = (WebRtc_Word16)((WebRtc_Word32)aecm->nearLogEnergy[CORR_MAX
-                        + j] * CORR_WIDTH > delayMeanNear[CORR_MAX]);
-                sumBitPattern += !(bitPatternEcho ^ bitPatternNear);
-            }
-#else
-            for (j = 0; j < CORR_WIDTH -1; )
-            {
-                bitPatternEcho = (WebRtc_Word16)((WebRtc_Word32)aecm->echoStoredLogEnergy[i
-                    + j] * CORR_WIDTH > delayMeanEcho[i]);
-                bitPatternNear = (WebRtc_Word16)((WebRtc_Word32)aecm->nearLogEnergy[CORR_MAX
-                    + j] * CORR_WIDTH > delayMeanNear[CORR_MAX]);
-                sumBitPattern += !(bitPatternEcho ^ bitPatternNear);
-                j++;
-                bitPatternEcho = (WebRtc_Word16)((WebRtc_Word32)aecm->echoStoredLogEnergy[i
-                    + j] * CORR_WIDTH > delayMeanEcho[i]);
-                bitPatternNear = (WebRtc_Word16)((WebRtc_Word32)aecm->nearLogEnergy[CORR_MAX
-                    + j] * CORR_WIDTH > delayMeanNear[CORR_MAX]);
-                sumBitPattern += !(bitPatternEcho ^ bitPatternNear);
-                j++;
-            }
-            bitPatternEcho = (WebRtc_Word16)((WebRtc_Word32)aecm->echoStoredLogEnergy[i + j]
-                    * CORR_WIDTH > delayMeanEcho[i]);
-            bitPatternNear = (WebRtc_Word16)((WebRtc_Word32)aecm->nearLogEnergy[CORR_MAX + j]
-                    * CORR_WIDTH > delayMeanNear[CORR_MAX]);
-            sumBitPattern += !(bitPatternEcho ^ bitPatternNear);
-#endif
-            aecm->delayCorrelation[i] = sumBitPattern;
-        }
-        aecm->newDelayCorrData = 1; // Indicate we have new correlation data to evaluate
-    }
-    if ((aecm->startupState == 2) & (aecm->lastDelayUpdateCount > (CORR_WIDTH << 1))
-            & aecm->newDelayCorrData)
-    {
-        // Find maximum value and maximum position as well as values on the sides.
-        maxPos = 0;
-        maxValue = aecm->delayCorrelation[0];
-        maxValueLeft = maxValue;
-        maxValueRight = aecm->delayCorrelation[CORR_DEV];
-        for (i = 1; i < CORR_BUF_LEN; i++)
-        {
-            if (aecm->delayCorrelation[i] > maxValue)
-            {
-                maxValue = aecm->delayCorrelation[i];
-                maxPos = i;
-                if (maxPos < CORR_DEV)
-                {
-                    maxValueLeft = aecm->delayCorrelation[0];
-                    maxValueRight = aecm->delayCorrelation[i + CORR_DEV];
-                } else if (maxPos > (CORR_MAX << 1) - CORR_DEV)
-                {
-                    maxValueLeft = aecm->delayCorrelation[i - CORR_DEV];
-                    maxValueRight = aecm->delayCorrelation[(CORR_MAX << 1)];
-                } else
-                {
-                    maxValueLeft = aecm->delayCorrelation[i - CORR_DEV];
-                    maxValueRight = aecm->delayCorrelation[i + CORR_DEV];
-                }
-            }
-        }
-        if ((maxPos > 0) & (maxPos < (CORR_MAX << 1)))
-        {
-            // Avoid maximum at boundaries. The maximum peak has to be higher than
-            // CORR_MAX_LEVEL. It also has to be sharp, i.e. the value CORR_DEV bins off should
-            // be CORR_MAX_LOW lower than the maximum.
-            if ((maxValue > CORR_MAX_LEVEL) & (maxValueLeft < maxValue - CORR_MAX_LOW)
-                    & (maxValueRight < maxValue - CORR_MAX_LOW))
-            {
-                aecm->delayAdjust += CORR_MAX - maxPos;
-                aecm->newDelayCorrData = 0;
-                aecm->lastDelayUpdateCount = 0;
-            }
-        }
-    }
-    // END: "Check delay"
-}
-
-void WebRtcAecm_ProcessBlock(AecmCore_t * const aecm, const WebRtc_Word16 * const farend,
-                             const WebRtc_Word16 * const nearendNoisy,
-                             const WebRtc_Word16 * const nearendClean,
-                             WebRtc_Word16 * const output)
-{
-    int i, j;
-
-    WebRtc_UWord32 xfaSum;
-    WebRtc_UWord32 dfaNoisySum;
-    WebRtc_UWord32 echoEst32Gained;
-    WebRtc_UWord32 tmpU32;
-
-    WebRtc_Word32 tmp32no1;
-    WebRtc_Word32 tmp32no2;
-    WebRtc_Word32 echoEst32[PART_LEN1];
-
-    WebRtc_UWord16 xfa[PART_LEN1];
-    WebRtc_UWord16 dfaNoisy[PART_LEN1];
-    WebRtc_UWord16 dfaClean[PART_LEN1];
-    WebRtc_UWord16* ptrDfaClean = dfaClean;
-
-    int outCFFT;
-
-    WebRtc_Word16 fft[PART_LEN4];
-#if (defined ARM_WINM) || (defined ARM9E_GCC) || (defined ANDROID_AECOPT)
-    WebRtc_Word16 postFft[PART_LEN4];
-#else
-    WebRtc_Word16 postFft[PART_LEN2];
-#endif
-    WebRtc_Word16 dfwReal[PART_LEN1];
-    WebRtc_Word16 dfwImag[PART_LEN1];
-    WebRtc_Word16 xfwReal[PART_LEN1];
-    WebRtc_Word16 xfwImag[PART_LEN1];
-    WebRtc_Word16 efwReal[PART_LEN1];
-    WebRtc_Word16 efwImag[PART_LEN1];
-    WebRtc_Word16 hnl[PART_LEN1];
-    WebRtc_Word16 numPosCoef;
-    WebRtc_Word16 nlpGain;
-    WebRtc_Word16 delay, diff, diffMinusOne;
-    WebRtc_Word16 tmp16no1;
-    WebRtc_Word16 tmp16no2;
-#ifdef AECM_WITH_ABS_APPROX
-    WebRtc_Word16 maxValue;
-    WebRtc_Word16 minValue;
-#endif
-    WebRtc_Word16 mu;
-    WebRtc_Word16 supGain;
-    WebRtc_Word16 zeros32, zeros16;
-    WebRtc_Word16 zerosDBufNoisy, zerosDBufClean, zerosXBuf;
-    WebRtc_Word16 resolutionDiff, qDomainDiff;
-
-#ifdef ARM_WINM_LOG_
-    DWORD temp;
-    static int flag0 = 0;
-    __int64 freq, start, end, diff__;
-    unsigned int milliseconds;
-#endif
-
-#ifdef AECM_WITH_ABS_APPROX
-    WebRtc_UWord16 alpha, beta;
-#endif
-
-    // Determine startup state. There are three states:
-    // (0) the first CONV_LEN blocks
-    // (1) another CONV_LEN blocks
-    // (2) the rest
-
-    if (aecm->startupState < 2)
-    {
-        aecm->startupState = (aecm->totCount >= CONV_LEN) + (aecm->totCount >= CONV_LEN2);
-    }
-    // END: Determine startup state
-
-    // Buffer near and far end signals
-    memcpy(aecm->xBuf + PART_LEN, farend, sizeof(WebRtc_Word16) * PART_LEN);
-    memcpy(aecm->dBufNoisy + PART_LEN, nearendNoisy, sizeof(WebRtc_Word16) * PART_LEN);
-    if (nearendClean != NULL)
-    {
-        memcpy(aecm->dBufClean + PART_LEN, nearendClean, sizeof(WebRtc_Word16) * PART_LEN);
-    }
-    // TODO(bjornv): Will be removed in final version.
-#ifdef VAD_DATA
-    fwrite(aecm->xBuf, sizeof(WebRtc_Word16), PART_LEN, aecm->far_file);
-#endif
-
-#ifdef AECM_DYNAMIC_Q
-    tmp16no1 = WebRtcSpl_MaxAbsValueW16(aecm->dBufNoisy, PART_LEN2);
-    tmp16no2 = WebRtcSpl_MaxAbsValueW16(aecm->xBuf, PART_LEN2);
-    zerosDBufNoisy = WebRtcSpl_NormW16(tmp16no1);
-    zerosXBuf = WebRtcSpl_NormW16(tmp16no2);
-#else
-    zerosDBufNoisy = 0;
-    zerosXBuf = 0;
-#endif
-    aecm->dfaNoisyQDomainOld = aecm->dfaNoisyQDomain;
-    aecm->dfaNoisyQDomain = zerosDBufNoisy;
-
-    if (nearendClean != NULL)
-    {
-#ifdef AECM_DYNAMIC_Q
-        tmp16no1 = WebRtcSpl_MaxAbsValueW16(aecm->dBufClean, PART_LEN2);
-        zerosDBufClean = WebRtcSpl_NormW16(tmp16no1);
-#else
-        zerosDBufClean = 0;
-#endif
-        aecm->dfaCleanQDomainOld = aecm->dfaCleanQDomain;
-        aecm->dfaCleanQDomain = zerosDBufClean;
-    } else
-    {
-        zerosDBufClean = zerosDBufNoisy;
-        aecm->dfaCleanQDomainOld = aecm->dfaNoisyQDomainOld;
-        aecm->dfaCleanQDomain = aecm->dfaNoisyQDomain;
-    }
-
-#ifdef ARM_WINM_LOG_
-    // measure tick start
-    QueryPerformanceFrequency((LARGE_INTEGER*)&freq);
-    QueryPerformanceCounter((LARGE_INTEGER*)&start);
-#endif
-
-    // FFT of noisy near end signal
-    for (i = 0; i < PART_LEN; i++)
-    {
-        j = WEBRTC_SPL_LSHIFT_W32(i, 1);
-        // Window near end
-        fft[j] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((aecm->dBufNoisy[i]
-                        << zerosDBufNoisy), kSqrtHanning[i], 14);
-        fft[PART_LEN2 + j] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(
-                (aecm->dBufNoisy[PART_LEN + i] << zerosDBufNoisy),
-                kSqrtHanning[PART_LEN - i], 14);
-        // Inserting zeros in imaginary parts
-        fft[j + 1] = 0;
-        fft[PART_LEN2 + j + 1] = 0;
-    }
-
-    // Fourier transformation of near end signal.
-    // The result is scaled with 1/PART_LEN2, that is, the result is in Q(-6) for PART_LEN = 32
-
-#if (defined ARM_WINM) || (defined ARM9E_GCC) || (defined ANDROID_AECOPT)
-    outCFFT = WebRtcSpl_ComplexFFT2(fft, postFft, PART_LEN_SHIFT, 1);
-
-    // The imaginary part has to switch sign
-    for(i = 1; i < PART_LEN2-1;)
-    {
-        postFft[i] = -postFft[i];
-        i += 2;
-        postFft[i] = -postFft[i];
-        i += 2;
-    }
-#else
-    WebRtcSpl_ComplexBitReverse(fft, PART_LEN_SHIFT);
-    outCFFT = WebRtcSpl_ComplexFFT(fft, PART_LEN_SHIFT, 1);
-
-    // Take only the first PART_LEN2 samples
-    for (i = 0; i < PART_LEN2; i++)
-    {
-        postFft[i] = fft[i];
-    }
-    // The imaginary part has to switch sign
-    for (i = 1; i < PART_LEN2;)
-    {
-        postFft[i] = -postFft[i];
-        i += 2;
-    }
-#endif
-
-    // Extract imaginary and real part, calculate the magnitude for all frequency bins
-    dfwImag[0] = 0;
-    dfwImag[PART_LEN] = 0;
-    dfwReal[0] = postFft[0];
-#if (defined ARM_WINM) || (defined ARM9E_GCC) || (defined ANDROID_AECOPT)
-    dfwReal[PART_LEN] = postFft[PART_LEN2];
-#else
-    dfwReal[PART_LEN] = fft[PART_LEN2];
-#endif
-    dfaNoisy[0] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(dfwReal[0]);
-    dfaNoisy[PART_LEN] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(dfwReal[PART_LEN]);
-    dfaNoisySum = (WebRtc_UWord32)(dfaNoisy[0]);
-    dfaNoisySum += (WebRtc_UWord32)(dfaNoisy[PART_LEN]);
-
-    for (i = 1; i < PART_LEN; i++)
-    {
-        j = WEBRTC_SPL_LSHIFT_W32(i, 1);
-        dfwReal[i] = postFft[j];
-        dfwImag[i] = postFft[j + 1];
-
-        if (dfwReal[i] == 0 || dfwImag[i] == 0)
-        {
-            dfaNoisy[i] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(dfwReal[i] + dfwImag[i]);
-        } else
-        {
-            // Approximation for magnitude of complex fft output
-            // magn = sqrt(real^2 + imag^2)
-            // magn ~= alpha * max(|imag|,|real|) + beta * min(|imag|,|real|)
-            //
-            // The parameters alpha and beta are stored in Q15
-
-            tmp16no1 = WEBRTC_SPL_ABS_W16(postFft[j]);
-            tmp16no2 = WEBRTC_SPL_ABS_W16(postFft[j + 1]);
-
-#ifdef AECM_WITH_ABS_APPROX
-            if(tmp16no1 > tmp16no2)
-            {
-                maxValue = tmp16no1;
-                minValue = tmp16no2;
-            } else
-            {
-                maxValue = tmp16no2;
-                minValue = tmp16no1;
-            }
-
-            // Magnitude in Q-6
-            if ((maxValue >> 2) > minValue)
-            {
-                alpha = kAlpha1;
-                beta = kBeta1;
-            } else if ((maxValue >> 1) > minValue)
-            {
-                alpha = kAlpha2;
-                beta = kBeta2;
-            } else
-            {
-                alpha = kAlpha3;
-                beta = kBeta3;
-            }
-            tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(maxValue, alpha, 15);
-            tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(minValue, beta, 15);
-            dfaNoisy[i] = (WebRtc_UWord16)tmp16no1 + (WebRtc_UWord16)tmp16no2;
-#else
-            tmp32no1 = WEBRTC_SPL_MUL_16_16(tmp16no1, tmp16no1);
-            tmp32no2 = WEBRTC_SPL_MUL_16_16(tmp16no2, tmp16no2);
-            tmp32no2 = WEBRTC_SPL_ADD_SAT_W32(tmp32no1, tmp32no2);
-            tmp32no1 = WebRtcSpl_Sqrt(tmp32no2);
-            dfaNoisy[i] = (WebRtc_UWord16)tmp32no1;
-#endif
-        }
-        dfaNoisySum += (WebRtc_UWord32)dfaNoisy[i];
-    }
-    // END: FFT of noisy near end signal
-
-    if (nearendClean == NULL)
-    {
-        ptrDfaClean = dfaNoisy;
-    } else
-    {
-        // FFT of clean near end signal
-        for (i = 0; i < PART_LEN; i++)
-        {
-            j = WEBRTC_SPL_LSHIFT_W32(i, 1);
-            // Window near end
-            fft[j]
-                    = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((aecm->dBufClean[i] << zerosDBufClean), kSqrtHanning[i], 14);
-            fft[PART_LEN2 + j]
-                    = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((aecm->dBufClean[PART_LEN + i] << zerosDBufClean), kSqrtHanning[PART_LEN - i], 14);
-            // Inserting zeros in imaginary parts
-            fft[j + 1] = 0;
-            fft[PART_LEN2 + j + 1] = 0;
-        }
-
-        // Fourier transformation of near end signal.
-        // The result is scaled with 1/PART_LEN2, that is, in Q(-6) for PART_LEN = 32
-
-#if (defined ARM_WINM) || (defined ARM9E_GCC) || (defined ANDROID_AECOPT)
-        outCFFT = WebRtcSpl_ComplexFFT2(fft, postFft, PART_LEN_SHIFT, 1);
-
-        // The imaginary part has to switch sign
-        for(i = 1; i < PART_LEN2-1;)
-        {
-            postFft[i] = -postFft[i];
-            i += 2;
-            postFft[i] = -postFft[i];
-            i += 2;
-        }
-#else
-        WebRtcSpl_ComplexBitReverse(fft, PART_LEN_SHIFT);
-        outCFFT = WebRtcSpl_ComplexFFT(fft, PART_LEN_SHIFT, 1);
-
-        // Take only the first PART_LEN2 samples
-        for (i = 0; i < PART_LEN2; i++)
-        {
-            postFft[i] = fft[i];
-        }
-        // The imaginary part has to switch sign
-        for (i = 1; i < PART_LEN2;)
-        {
-            postFft[i] = -postFft[i];
-            i += 2;
-        }
-#endif
-
-        // Extract imaginary and real part, calculate the magnitude for all frequency bins
-        dfwImag[0] = 0;
-        dfwImag[PART_LEN] = 0;
-        dfwReal[0] = postFft[0];
-#if (defined ARM_WINM) || (defined ARM9E_GCC) || (defined ANDROID_AECOPT)
-        dfwReal[PART_LEN] = postFft[PART_LEN2];
-#else
-        dfwReal[PART_LEN] = fft[PART_LEN2];
-#endif
-        dfaClean[0] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(dfwReal[0]);
-        dfaClean[PART_LEN] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(dfwReal[PART_LEN]);
-
-        for (i = 1; i < PART_LEN; i++)
-        {
-            j = WEBRTC_SPL_LSHIFT_W32(i, 1);
-            dfwReal[i] = postFft[j];
-            dfwImag[i] = postFft[j + 1];
-
-            if (dfwReal[i] == 0 || dfwImag[i] == 0)
-            {
-                dfaClean[i] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(dfwReal[i] + dfwImag[i]);
-            } else
-            {
-                // Approximation for magnitude of complex fft output
-                // magn = sqrt(real^2 + imag^2)
-                // magn ~= alpha * max(|imag|,|real|) + beta * min(|imag|,|real|)
-                //
-                // The parameters alpha and beta are stored in Q15
-
-                tmp16no1 = WEBRTC_SPL_ABS_W16(postFft[j]);
-                tmp16no2 = WEBRTC_SPL_ABS_W16(postFft[j + 1]);
-
-#ifdef AECM_WITH_ABS_APPROX
-                if(tmp16no1 > tmp16no2)
-                {
-                    maxValue = tmp16no1;
-                    minValue = tmp16no2;
-                } else
-                {
-                    maxValue = tmp16no2;
-                    minValue = tmp16no1;
-                }
-
-                // Magnitude in Q-6
-                if ((maxValue >> 2) > minValue)
-                {
-                    alpha = kAlpha1;
-                    beta = kBeta1;
-                } else if ((maxValue >> 1) > minValue)
-                {
-                    alpha = kAlpha2;
-                    beta = kBeta2;
-                } else
-                {
-                    alpha = kAlpha3;
-                    beta = kBeta3;
-                }
-                tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(maxValue, alpha, 15);
-                tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(minValue, beta, 15);
-                dfaClean[i] = (WebRtc_UWord16)tmp16no1 + (WebRtc_UWord16)tmp16no2;
-#else
-                tmp32no1 = WEBRTC_SPL_MUL_16_16(tmp16no1, tmp16no1);
-                tmp32no2 = WEBRTC_SPL_MUL_16_16(tmp16no2, tmp16no2);
-                tmp32no2 = WEBRTC_SPL_ADD_SAT_W32(tmp32no1, tmp32no2);
-                tmp32no1 = WebRtcSpl_Sqrt(tmp32no2);
-                dfaClean[i] = (WebRtc_UWord16)tmp32no1;
-#endif
-            }
-        }
-    }
-    // END: FFT of clean near end signal
-
-    // FFT of far end signal
-    for (i = 0; i < PART_LEN; i++)
-    {
-        j = WEBRTC_SPL_LSHIFT_W32(i, 1);
-        // Window farend
-        fft[j]
-                = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((aecm->xBuf[i] << zerosXBuf), kSqrtHanning[i], 14);
-        fft[PART_LEN2 + j]
-                = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT((aecm->xBuf[PART_LEN + i] << zerosXBuf), kSqrtHanning[PART_LEN - i], 14);
-        // Inserting zeros in imaginary parts
-        fft[j + 1] = 0;
-        fft[PART_LEN2 + j + 1] = 0;
-    }
-    // Fourier transformation of far end signal.
-    // The result is scaled with 1/PART_LEN2, that is the result is in Q(-6) for PART_LEN = 32
-#if (defined ARM_WINM) || (defined ARM9E_GCC) || (defined ANDROID_AECOPT)
-    outCFFT = WebRtcSpl_ComplexFFT2(fft, postFft, PART_LEN_SHIFT, 1);
-
-    // The imaginary part has to switch sign
-    for(i = 1; i < PART_LEN2-1;)
-    {
-        postFft[i] = -postFft[i];
-        i += 2;
-        postFft[i] = -postFft[i];
-        i += 2;
-    }
-#else
-    WebRtcSpl_ComplexBitReverse(fft, PART_LEN_SHIFT);
-    outCFFT = WebRtcSpl_ComplexFFT(fft, PART_LEN_SHIFT, 1);
-
-    // Take only the first PART_LEN2 samples
-    for (i = 0; i < PART_LEN2; i++)
-    {
-        postFft[i] = fft[i];
-    }
-    // The imaginary part has to switch sign
-    for (i = 1; i < PART_LEN2;)
-    {
-        postFft[i] = -postFft[i];
-        i += 2;
-    }
-#endif
-
-    // Extract imaginary and real part, calculate the magnitude for all frequency bins
-    xfwImag[0] = 0;
-    xfwImag[PART_LEN] = 0;
-    xfwReal[0] = postFft[0];
-#if (defined ARM_WINM) || (defined ARM9E_GCC) || (defined ANDROID_AECOPT)
-    xfwReal[PART_LEN] = postFft[PART_LEN2];
-#else
-    xfwReal[PART_LEN] = fft[PART_LEN2];
-#endif
-    xfa[0] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(xfwReal[0]);
-    xfa[PART_LEN] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(xfwReal[PART_LEN]);
-    xfaSum = (WebRtc_UWord32)(xfa[0]) + (WebRtc_UWord32)(xfa[PART_LEN]);
-
-    for (i = 1; i < PART_LEN; i++)
-    {
-        j = WEBRTC_SPL_LSHIFT_W32(i,1);
-        xfwReal[i] = postFft[j];
-        xfwImag[i] = postFft[j + 1];
-
-        if (xfwReal[i] == 0 || xfwImag[i] == 0)
-        {
-            xfa[i] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(xfwReal[i] + xfwImag[i]);
-        } else
-        {
-            // Approximation for magnitude of complex fft output
-            // magn = sqrt(real^2 + imag^2)
-            // magn ~= alpha * max(|imag|,|real|) + beta * min(|imag|,|real|)
-            //
-            // The parameters alpha and beta are stored in Q15
-
-            tmp16no1 = WEBRTC_SPL_ABS_W16(postFft[j]);
-            tmp16no2 = WEBRTC_SPL_ABS_W16(postFft[j + 1]);
-
-#ifdef AECM_WITH_ABS_APPROX
-            if(tmp16no1 > xfwImag[i])
-            {
-                maxValue = tmp16no1;
-                minValue = tmp16no2;
-            } else
-            {
-                maxValue = tmp16no2;
-                minValue = tmp16no1;
-            }
-            // Magnitude in Q-6
-            if ((maxValue >> 2) > minValue)
-            {
-                alpha = kAlpha1;
-                beta = kBeta1;
-            } else if ((maxValue >> 1) > minValue)
-            {
-                alpha = kAlpha2;
-                beta = kBeta2;
-            } else
-            {
-                alpha = kAlpha3;
-                beta = kBeta3;
-            }
-            tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(maxValue, alpha, 15);
-            tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(minValue, beta, 15);
-            xfa[i] = (WebRtc_UWord16)tmp16no1 + (WebRtc_UWord16)tmp16no2;
-#else
-            tmp32no1 = WEBRTC_SPL_MUL_16_16(tmp16no1, tmp16no1);
-            tmp32no2 = WEBRTC_SPL_MUL_16_16(tmp16no2, tmp16no2);
-            tmp32no2 = WEBRTC_SPL_ADD_SAT_W32(tmp32no1, tmp32no2);
-            tmp32no1 = WebRtcSpl_Sqrt(tmp32no2);
-            xfa[i] = (WebRtc_UWord16)tmp32no1;
-#endif
-        }
-        xfaSum += (WebRtc_UWord32)xfa[i];
-    }
-
-#ifdef ARM_WINM_LOG_
-    // measure tick end
-    QueryPerformanceCounter((LARGE_INTEGER*)&end);
-    diff__ = ((end - start) * 1000) / (freq/1000);
-    milliseconds = (unsigned int)(diff__ & 0xffffffff);
-    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
-#endif
-    // END: FFT of far end signal
-
-    // Get the delay
-
-    // Fixed delay estimation
-    // input: dfaFIX, xfaFIX in Q-stages
-    // output: delay in Q0
-    //
-    // comment on the fixed point accuracy of estimate_delayFIX
-    // -> due to rounding the fixed point variables xfa and dfa contain a lot more zeros
-    // than the corresponding floating point variables this results in big differences
-    // between the floating point and the fixed point logarithmic spectra for small values
-#ifdef ARM_WINM_LOG_
-    // measure tick start
-    QueryPerformanceCounter((LARGE_INTEGER*)&start);
-#endif
-
-    // Save far-end history and estimate delay
-    delay = WebRtcAecm_EstimateDelay(aecm, xfa, dfaNoisy, zerosXBuf);
-
-    if (aecm->fixedDelay >= 0)
-    {
-        // Use fixed delay
-        delay = aecm->fixedDelay;
-    }
-
-    aecm->currentDelay = delay;
-
-    if ((aecm->delayOffsetFlag) & (aecm->startupState > 0)) // If delay compensation is on
-    {
-        // If the delay estimate changed from previous block, update the offset
-        if ((aecm->currentDelay != aecm->previousDelay) & !aecm->currentDelay
-                & !aecm->previousDelay)
-        {
-            aecm->delayAdjust += (aecm->currentDelay - aecm->previousDelay);
-        }
-        // Compensate with the offset estimate
-        aecm->currentDelay -= aecm->delayAdjust;
-        aecm->previousDelay = delay;
-    }
-
-    diff = aecm->delHistoryPos - aecm->currentDelay;
-    if (diff < 0)
-    {
-        diff = diff + MAX_DELAY;
-    }
-
-#ifdef ARM_WINM_LOG_
-    // measure tick end
-    QueryPerformanceCounter((LARGE_INTEGER*)&end);
-    diff__ = ((end - start) * 1000) / (freq/1000);
-    milliseconds = (unsigned int)(diff__ & 0xffffffff);
-    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
-#endif
-
-    // END: Get the delay
-
-#ifdef ARM_WINM_LOG_
-    // measure tick start
-    QueryPerformanceCounter((LARGE_INTEGER*)&start);
-#endif
-    // Calculate log(energy) and update energy threshold levels
-    WebRtcAecm_CalcEnergies(aecm, diff, dfaNoisySum, echoEst32);
-
-    // Calculate stepsize
-    mu = WebRtcAecm_CalcStepSize(aecm);
-
-    // Update counters
-    aecm->totCount++;
-    aecm->lastDelayUpdateCount++;
-
-    // This is the channel estimation algorithm.
-    // It is base on NLMS but has a variable step length, which was calculated above.
-    WebRtcAecm_UpdateChannel(aecm, dfaNoisy, diff, mu, echoEst32);
-    WebRtcAecm_DelayCompensation(aecm);
-    supGain = WebRtcAecm_CalcSuppressionGain(aecm);
-
-#ifdef ARM_WINM_LOG_
-    // measure tick end
-    QueryPerformanceCounter((LARGE_INTEGER*)&end);
-    diff__ = ((end - start) * 1000) / (freq/1000);
-    milliseconds = (unsigned int)(diff__ & 0xffffffff);
-    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
-#endif
-
-#ifdef ARM_WINM_LOG_
-    // measure tick start
-    QueryPerformanceCounter((LARGE_INTEGER*)&start);
-#endif
-
-    // Calculate Wiener filter hnl[]
-    numPosCoef = 0;
-    diffMinusOne = diff - 1;
-    if (diff == 0)
-    {
-        diffMinusOne = MAX_DELAY;
-    }
-    for (i = 0; i < PART_LEN1; i++)
-    {
-        // Far end signal through channel estimate in Q8
-        // How much can we shift right to preserve resolution
-        tmp32no1 = echoEst32[i] - aecm->echoFilt[i];
-        aecm->echoFilt[i] += WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_32_16(tmp32no1, 50), 8);
-
-        zeros32 = WebRtcSpl_NormW32(aecm->echoFilt[i]) + 1;
-        zeros16 = WebRtcSpl_NormW16(supGain) + 1;
-        if (zeros32 + zeros16 > 16)
-        {
-            // Multiplication is safe
-            // Result in Q(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN+aecm->xfaQDomainBuf[diff])
-            echoEst32Gained = WEBRTC_SPL_UMUL_32_16((WebRtc_UWord32)aecm->echoFilt[i],
-                                                    (WebRtc_UWord16)supGain);
-            resolutionDiff = 14 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN;
-            resolutionDiff += (aecm->dfaCleanQDomain - aecm->xfaQDomainBuf[diff]);
-        } else
-        {
-            tmp16no1 = 17 - zeros32 - zeros16;
-            resolutionDiff = 14 + tmp16no1 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN;
-            resolutionDiff += (aecm->dfaCleanQDomain - aecm->xfaQDomainBuf[diff]);
-            if (zeros32 > tmp16no1)
-            {
-                echoEst32Gained = WEBRTC_SPL_UMUL_32_16((WebRtc_UWord32)aecm->echoFilt[i],
-                        (WebRtc_UWord16)WEBRTC_SPL_RSHIFT_W16(supGain,
-                                tmp16no1)); // Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16)
-            } else
-            {
-                // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16)
-                echoEst32Gained = WEBRTC_SPL_UMUL_32_16(
-                        (WebRtc_UWord32)WEBRTC_SPL_RSHIFT_W32(aecm->echoFilt[i], tmp16no1),
-                        (WebRtc_UWord16)supGain);
-            }
-        }
-
-        zeros16 = WebRtcSpl_NormW16(aecm->nearFilt[i]);
-        if ((zeros16 < (aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld))
-                & (aecm->nearFilt[i]))
-        {
-            tmp16no1 = WEBRTC_SPL_SHIFT_W16(aecm->nearFilt[i], zeros16);
-            qDomainDiff = zeros16 - aecm->dfaCleanQDomain + aecm->dfaCleanQDomainOld;
-        } else
-        {
-            tmp16no1 = WEBRTC_SPL_SHIFT_W16(aecm->nearFilt[i], aecm->dfaCleanQDomain
-                                            - aecm->dfaCleanQDomainOld);
-            qDomainDiff = 0;
-        }
-        tmp16no2 = WEBRTC_SPL_SHIFT_W16(ptrDfaClean[i], qDomainDiff);
-        tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no2 - tmp16no1, 1, 4);
-        tmp16no2 += tmp16no1;
-        zeros16 = WebRtcSpl_NormW16(tmp16no2);
-        if ((tmp16no2) & (-qDomainDiff > zeros16))
-        {
-            aecm->nearFilt[i] = WEBRTC_SPL_WORD16_MAX;
-        } else
-        {
-            aecm->nearFilt[i] = WEBRTC_SPL_SHIFT_W16(tmp16no2, -qDomainDiff);
-        }
-
-        // Wiener filter coefficients, resulting hnl in Q14
-        if (echoEst32Gained == 0)
-        {
-            hnl[i] = ONE_Q14;
-        } else if (aecm->nearFilt[i] == 0)
-        {
-            hnl[i] = 0;
-        } else
-        {
-            // Multiply the suppression gain
-            // Rounding
-            echoEst32Gained += (WebRtc_UWord32)(aecm->nearFilt[i] >> 1);
-            tmpU32 = WebRtcSpl_DivU32U16(echoEst32Gained, (WebRtc_UWord16)aecm->nearFilt[i]);
-
-            // Current resolution is
-            // Q-(RESOLUTION_CHANNEL + RESOLUTION_SUPGAIN - max(0, 17 - zeros16 - zeros32))
-            // Make sure we are in Q14
-            tmp32no1 = (WebRtc_Word32)WEBRTC_SPL_SHIFT_W32(tmpU32, resolutionDiff);
-            if (tmp32no1 > ONE_Q14)
-            {
-                hnl[i] = 0;
-            } else if (tmp32no1 < 0)
-            {
-                hnl[i] = ONE_Q14;
-            } else
-            {
-                // 1-echoEst/dfa
-#if (!defined ARM_WINM) && (!defined ARM9E_GCC) && (!defined ANDROID_AECOPT)
-                hnl[i] = ONE_Q14 - (WebRtc_Word16)tmp32no1;
-                if (hnl[i] < 0)
-                {
-                    hnl[i] = 0;
-                }
-#else
-                hnl[i] = ((ONE_Q14 - (WebRtc_Word16)tmp32no1) > 0) ? (ONE_Q14 - (WebRtc_Word16)tmp32no1) : 0;
-#endif
-            }
-        }
-        if (hnl[i])
-        {
-            numPosCoef++;
-        }
-    }
-
-#ifdef ARM_WINM_LOG_
-    // measure tick end
-    QueryPerformanceCounter((LARGE_INTEGER*)&end);
-    diff__ = ((end - start) * 1000) / (freq/1000);
-    milliseconds = (unsigned int)(diff__ & 0xffffffff);
-    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
-#endif
-
-#ifdef ARM_WINM_LOG_
-    // measure tick start
-    QueryPerformanceCounter((LARGE_INTEGER*)&start);
-#endif
-
-    // Calculate NLP gain, result is in Q14
-    for (i = 0; i < PART_LEN1; i++)
-    {
-        if (aecm->nlpFlag)
-        {
-            // Truncate values close to zero and one.
-            if (hnl[i] > NLP_COMP_HIGH)
-            {
-                hnl[i] = ONE_Q14;
-            } else if (hnl[i] < NLP_COMP_LOW)
-            {
-                hnl[i] = 0;
-            }
-
-            // Remove outliers
-            if (numPosCoef < 3)
-            {
-                nlpGain = 0;
-            } else
-            {
-                nlpGain = ONE_Q14;
-            }
-            // NLP
-            if ((hnl[i] == ONE_Q14) && (nlpGain == ONE_Q14))
-            {
-                hnl[i] = ONE_Q14;
-            } else
-            {
-                hnl[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(hnl[i], nlpGain, 14);
-            }
-        }
-
-        // multiply with Wiener coefficients
-        efwReal[i] = (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfwReal[i], hnl[i],
-                                                                          14));
-        efwImag[i] = (WebRtc_Word16)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfwImag[i], hnl[i],
-                                                                          14));
-    }
-
-    if (aecm->cngMode == AecmTrue)
-    {
-        WebRtcAecm_ComfortNoise(aecm, ptrDfaClean, efwReal, efwImag, hnl);
-    }
-
-#ifdef ARM_WINM_LOG_
-    // measure tick end
-    QueryPerformanceCounter((LARGE_INTEGER*)&end);
-    diff__ = ((end - start) * 1000) / (freq/1000);
-    milliseconds = (unsigned int)(diff__ & 0xffffffff);
-    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
-#endif
-
-#ifdef ARM_WINM_LOG_
-    // measure tick start
-    QueryPerformanceCounter((LARGE_INTEGER*)&start);
-#endif
-
-    // Synthesis
-    for (i = 1; i < PART_LEN; i++)
-    {
-        j = WEBRTC_SPL_LSHIFT_W32(i, 1);
-        fft[j] = efwReal[i];
-
-        // mirrored data, even
-        fft[PART_LEN4 - j] = efwReal[i];
-        fft[j + 1] = -efwImag[i];
-
-        //mirrored data, odd
-        fft[PART_LEN4 - (j - 1)] = efwImag[i];
-    }
-    fft[0] = efwReal[0];
-    fft[1] = -efwImag[0];
-
-    fft[PART_LEN2] = efwReal[PART_LEN];
-    fft[PART_LEN2 + 1] = -efwImag[PART_LEN];
-
-#if (!defined ARM_WINM) && (!defined ARM9E_GCC) && (!defined ANDROID_AECOPT)
-    // inverse FFT, result should be scaled with outCFFT
-    WebRtcSpl_ComplexBitReverse(fft, PART_LEN_SHIFT);
-    outCFFT = WebRtcSpl_ComplexIFFT(fft, PART_LEN_SHIFT, 1);
-
-    //take only the real values and scale with outCFFT
-    for (i = 0; i < PART_LEN2; i++)
-    {
-        j = WEBRTC_SPL_LSHIFT_W32(i, 1);
-        fft[i] = fft[j];
-    }
-#else
-    outCFFT = WebRtcSpl_ComplexIFFT2(fft, postFft, PART_LEN_SHIFT, 1);
-
-    //take only the real values and scale with outCFFT
-    for(i = 0, j = 0; i < PART_LEN2;)
-    {
-        fft[i] = postFft[j];
-        i += 1;
-        j += 2;
-        fft[i] = postFft[j];
-        i += 1;
-        j += 2;
-    }
-#endif
-
-    for (i = 0; i < PART_LEN; i++)
-    {
-        fft[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
-                fft[i],
-                kSqrtHanning[i],
-                14);
-        tmp32no1 = WEBRTC_SPL_SHIFT_W32((WebRtc_Word32)fft[i],
-                outCFFT - aecm->dfaCleanQDomain);
-        fft[i] = (WebRtc_Word16)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX,
-                tmp32no1 + aecm->outBuf[i],
-                WEBRTC_SPL_WORD16_MIN);
-        output[i] = fft[i];
-
-        tmp32no1 = WEBRTC_SPL_MUL_16_16_RSFT(
-                fft[PART_LEN + i],
-                kSqrtHanning[PART_LEN - i],
-                14);
-        tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1,
-                outCFFT - aecm->dfaCleanQDomain);
-        aecm->outBuf[i] = (WebRtc_Word16)WEBRTC_SPL_SAT(
-                WEBRTC_SPL_WORD16_MAX,
-                tmp32no1,
-                WEBRTC_SPL_WORD16_MIN);
-    }
-
-#ifdef ARM_WINM_LOG_
-    // measure tick end
-    QueryPerformanceCounter((LARGE_INTEGER*)&end);
-    diff__ = ((end - start) * 1000) / (freq/1000);
-    milliseconds = (unsigned int)(diff__ & 0xffffffff);
-    WriteFile (logFile, &milliseconds, sizeof(unsigned int), &temp, NULL);
-#endif
-    // Copy the current block to the old position (outBuf is shifted elsewhere)
-    memcpy(aecm->xBuf, aecm->xBuf + PART_LEN, sizeof(WebRtc_Word16) * PART_LEN);
-    memcpy(aecm->dBufNoisy, aecm->dBufNoisy + PART_LEN, sizeof(WebRtc_Word16) * PART_LEN);
-    if (nearendClean != NULL)
-    {
-        memcpy(aecm->dBufClean, aecm->dBufClean + PART_LEN, sizeof(WebRtc_Word16) * PART_LEN);
-    }
-}
-
-// Generate comfort noise and add to output signal.
-//
-// \param[in]     aecm     Handle of the AECM instance.
-// \param[in]     dfa     Absolute value of the nearend signal (Q[aecm->dfaQDomain]).
-// \param[in,out] outReal Real part of the output signal (Q[aecm->dfaQDomain]).
-// \param[in,out] outImag Imaginary part of the output signal (Q[aecm->dfaQDomain]).
-// \param[in]     lambda  Suppression gain with which to scale the noise level (Q14).
-//
-static void WebRtcAecm_ComfortNoise(AecmCore_t * const aecm, const WebRtc_UWord16 * const dfa,
-                                    WebRtc_Word16 * const outReal,
-                                    WebRtc_Word16 * const outImag,
-                                    const WebRtc_Word16 * const lambda)
-{
-    WebRtc_Word16 i;
-    WebRtc_Word16 tmp16;
-    WebRtc_Word32 tmp32;
-
-    WebRtc_Word16 randW16[PART_LEN];
-    WebRtc_Word16 uReal[PART_LEN1];
-    WebRtc_Word16 uImag[PART_LEN1];
-    WebRtc_Word32 outLShift32[PART_LEN1];
-    WebRtc_Word16 noiseRShift16[PART_LEN1];
-
-    WebRtc_Word16 shiftFromNearToNoise[PART_LEN1];
-    WebRtc_Word16 minTrackShift;
-    WebRtc_Word32 upper32;
-    WebRtc_Word32 lower32;
-
-    if (aecm->noiseEstCtr < 100)
-    {
-        // Track the minimum more quickly initially.
-        aecm->noiseEstCtr++;
-        minTrackShift = 7;
-    } else
-    {
-        minTrackShift = 9;
-    }
-
-    // Estimate noise power.
-    for (i = 0; i < PART_LEN1; i++)
-    {
-        shiftFromNearToNoise[i] = aecm->noiseEstQDomain[i] - aecm->dfaCleanQDomain;
-
-        // Shift to the noise domain.
-        tmp32 = (WebRtc_Word32)dfa[i];
-        outLShift32[i] = WEBRTC_SPL_SHIFT_W32(tmp32, shiftFromNearToNoise[i]);
-
-        if (outLShift32[i] < aecm->noiseEst[i])
-        {
-            // Track the minimum.
-            aecm->noiseEst[i] += ((outLShift32[i] - aecm->noiseEst[i]) >> minTrackShift);
-        } else
-        {
-            // Ramp slowly upwards until we hit the minimum again.
-
-            // Avoid overflow.
-            if (aecm->noiseEst[i] < 2146435583)
-            {
-                // Store the fractional portion.
-                upper32 = (aecm->noiseEst[i] & 0xffff0000) >> 16;
-                lower32 = aecm->noiseEst[i] & 0x0000ffff;
-                upper32 = ((upper32 * 2049) >> 11);
-                lower32 = ((lower32 * 2049) >> 11);
-                aecm->noiseEst[i] = WEBRTC_SPL_ADD_SAT_W32(upper32 << 16, lower32);
-            }
-        }
-    }
-
-    for (i = 0; i < PART_LEN1; i++)
-    {
-        tmp32 = WEBRTC_SPL_SHIFT_W32(aecm->noiseEst[i], -shiftFromNearToNoise[i]);
-        if (tmp32 > 32767)
-        {
-            tmp32 = 32767;
-            aecm->noiseEst[i] = WEBRTC_SPL_SHIFT_W32(tmp32, shiftFromNearToNoise[i]);
-        }
-        noiseRShift16[i] = (WebRtc_Word16)tmp32;
-
-        tmp16 = ONE_Q14 - lambda[i];
-        noiseRShift16[i]
-                = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16, noiseRShift16[i], 14);
-    }
-
-    // Generate a uniform random array on [0 2^15-1].
-    WebRtcSpl_RandUArray(randW16, PART_LEN, &aecm->seed);
-
-    // Generate noise according to estimated energy.
-    uReal[0] = 0; // Reject LF noise.
-    uImag[0] = 0;
-    for (i = 1; i < PART_LEN1; i++)
-    {
-        // Get a random index for the cos and sin tables over [0 359].
-        tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(359, randW16[i - 1], 15);
-
-        // Tables are in Q13.
-        uReal[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(noiseRShift16[i],
-                WebRtcSpl_kCosTable[tmp16], 13);
-        uImag[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(-noiseRShift16[i],
-                WebRtcSpl_kSinTable[tmp16], 13);
-    }
-    uImag[PART_LEN] = 0;
-
-#if (!defined ARM_WINM) && (!defined ARM9E_GCC) && (!defined ANDROID_AECOPT)
-    for (i = 0; i < PART_LEN1; i++)
-    {
-        outReal[i] = WEBRTC_SPL_ADD_SAT_W16(outReal[i], uReal[i]);
-        outImag[i] = WEBRTC_SPL_ADD_SAT_W16(outImag[i], uImag[i]);
-    }
-#else
-    for (i = 0; i < PART_LEN1 -1; )
-    {
-        outReal[i] = WEBRTC_SPL_ADD_SAT_W16(outReal[i], uReal[i]);
-        outImag[i] = WEBRTC_SPL_ADD_SAT_W16(outImag[i], uImag[i]);
-        i++;
-
-        outReal[i] = WEBRTC_SPL_ADD_SAT_W16(outReal[i], uReal[i]);
-        outImag[i] = WEBRTC_SPL_ADD_SAT_W16(outImag[i], uImag[i]);
-        i++;
-    }
-    outReal[i] = WEBRTC_SPL_ADD_SAT_W16(outReal[i], uReal[i]);
-    outImag[i] = WEBRTC_SPL_ADD_SAT_W16(outImag[i], uImag[i]);
-#endif
-}
-
-void WebRtcAecm_BufferFarFrame(AecmCore_t * const aecm, const WebRtc_Word16 * const farend,
-                               const int farLen)
-{
-    int writeLen = farLen, writePos = 0;
-
-    // Check if the write position must be wrapped
-    while (aecm->farBufWritePos + writeLen > FAR_BUF_LEN)
-    {
-        // Write to remaining buffer space before wrapping
-        writeLen = FAR_BUF_LEN - aecm->farBufWritePos;
-        memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos,
-               sizeof(WebRtc_Word16) * writeLen);
-        aecm->farBufWritePos = 0;
-        writePos = writeLen;
-        writeLen = farLen - writeLen;
-    }
-
-    memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos,
-           sizeof(WebRtc_Word16) * writeLen);
-    aecm->farBufWritePos += writeLen;
-}
-
-void WebRtcAecm_FetchFarFrame(AecmCore_t * const aecm, WebRtc_Word16 * const farend,
-                              const int farLen, const int knownDelay)
-{
-    int readLen = farLen;
-    int readPos = 0;
-    int delayChange = knownDelay - aecm->lastKnownDelay;
-
-    aecm->farBufReadPos -= delayChange;
-
-    // Check if delay forces a read position wrap
-    while (aecm->farBufReadPos < 0)
-    {
-        aecm->farBufReadPos += FAR_BUF_LEN;
-    }
-    while (aecm->farBufReadPos > FAR_BUF_LEN - 1)
-    {
-        aecm->farBufReadPos -= FAR_BUF_LEN;
-    }
-
-    aecm->lastKnownDelay = knownDelay;
-
-    // Check if read position must be wrapped
-    while (aecm->farBufReadPos + readLen > FAR_BUF_LEN)
-    {
-
-        // Read from remaining buffer space before wrapping
-        readLen = FAR_BUF_LEN - aecm->farBufReadPos;
-        memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos,
-               sizeof(WebRtc_Word16) * readLen);
-        aecm->farBufReadPos = 0;
-        readPos = readLen;
-        readLen = farLen - readLen;
-    }
-    memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos,
-           sizeof(WebRtc_Word16) * readLen);
-    aecm->farBufReadPos += readLen;
-}
diff --git a/src/modules/audio_processing/agc/Android.mk b/src/modules/audio_processing/agc/Android.mk
new file mode 100644
index 0000000..546128d
--- /dev/null
+++ b/src/modules/audio_processing/agc/Android.mk
@@ -0,0 +1,40 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+include $(LOCAL_PATH)/../../../../android-webrtc.mk
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := libwebrtc_agc
+LOCAL_MODULE_TAGS := optional
+LOCAL_SRC_FILES := \
+    analog_agc.c \
+    digital_agc.c
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS)
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/interface \
+    $(LOCAL_PATH)/../../.. \
+    $(LOCAL_PATH)/../../../common_audio/signal_processing/include
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libdl \
+    libstlport
+
+ifndef NDK_ROOT
+include external/stlport/libstlport.mk
+endif
+include $(BUILD_STATIC_LIBRARY)
diff --git a/src/modules/audio_processing/agc/main/source/agc.gyp b/src/modules/audio_processing/agc/agc.gypi
similarity index 66%
rename from src/modules/audio_processing/agc/main/source/agc.gyp
rename to src/modules/audio_processing/agc/agc.gypi
index e28a4c8..78288b7 100644
--- a/src/modules/audio_processing/agc/main/source/agc.gyp
+++ b/src/modules/audio_processing/agc/agc.gypi
@@ -7,26 +7,23 @@
 # be found in the AUTHORS file in the root of the source tree.
 
 {
-  'includes': [
-    '../../../../../common_settings.gypi', # Common settings
-  ],
   'targets': [
     {
       'target_name': 'agc',
       'type': '<(library)',
       'dependencies': [
-        '../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
+        '<(webrtc_root)/common_audio/common_audio.gyp:signal_processing',
       ],
       'include_dirs': [
-        '../interface',
+        'interface',
       ],
       'direct_dependent_settings': {
         'include_dirs': [
-          '../interface',
+          'interface',
         ],
       },
       'sources': [
-        '../interface/gain_control.h',
+        'interface/gain_control.h',
         'analog_agc.c',
         'analog_agc.h',
         'digital_agc.c',
@@ -35,9 +32,3 @@
     },
   ],
 }
-
-# Local Variables:
-# tab-width:2
-# indent-tabs-mode:nil
-# End:
-# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/modules/audio_processing/agc/main/source/analog_agc.c b/src/modules/audio_processing/agc/analog_agc.c
similarity index 98%
rename from src/modules/audio_processing/agc/main/source/analog_agc.c
rename to src/modules/audio_processing/agc/analog_agc.c
index e52fd66..40c5566 100644
--- a/src/modules/audio_processing/agc/main/source/analog_agc.c
+++ b/src/modules/audio_processing/agc/analog_agc.c
@@ -200,6 +200,10 @@
     /* apply slowly varying digital gain */
     if (stt->micVol > stt->maxAnalog)
     {
+        /* |maxLevel| is strictly >= |micVol|, so this condition should be
+         * satisfied here, ensuring there is no divide-by-zero. */
+        assert(stt->maxLevel > stt->maxAnalog);
+
         /* Q1 */
         tmp16 = (WebRtc_Word16)(stt->micVol - stt->maxAnalog);
         tmp32 = WEBRTC_SPL_MUL_16_16(GAIN_TBL_LEN - 1, tmp16);
@@ -921,7 +925,8 @@
         stt->Rxx16_vectorw32[stt->Rxx16pos] = Rxx16w32;
 
         /* Circular buffer */
-        if (++(stt->Rxx16pos) == RXX_BUFFER_LEN)
+        stt->Rxx16pos++;
+        if (stt->Rxx16pos == RXX_BUFFER_LEN)
         {
             stt->Rxx16pos = 0;
         }
@@ -1316,10 +1321,19 @@
     *outMicLevel = inMicLevel;
     inMicLevelTmp = inMicLevel;
 
-    memcpy(out, in_near, samples * sizeof(WebRtc_Word16));
+    // TODO(andrew): clearly we don't need input and output pointers...
+    //   Change the interface to take a shared input/output.
+    if (in_near != out)
+    {
+        // Only needed if they don't already point to the same place.
+        memcpy(out, in_near, samples * sizeof(WebRtc_Word16));
+    }
     if (stt->fs == 32000)
     {
-        memcpy(out_H, in_near_H, samples * sizeof(WebRtc_Word16));
+        if (in_near_H != out_H)
+        {
+            memcpy(out_H, in_near_H, samples * sizeof(WebRtc_Word16));
+        }
     }
 
 #ifdef AGC_DEBUG//test log
diff --git a/src/modules/audio_processing/agc/main/source/analog_agc.h b/src/modules/audio_processing/agc/analog_agc.h
similarity index 100%
rename from src/modules/audio_processing/agc/main/source/analog_agc.h
rename to src/modules/audio_processing/agc/analog_agc.h
diff --git a/src/modules/audio_processing/agc/main/source/digital_agc.c b/src/modules/audio_processing/agc/digital_agc.c
similarity index 96%
rename from src/modules/audio_processing/agc/main/source/digital_agc.c
rename to src/modules/audio_processing/agc/digital_agc.c
index 2966586..3b4b39b 100644
--- a/src/modules/audio_processing/agc/main/source/digital_agc.c
+++ b/src/modules/audio_processing/agc/digital_agc.c
@@ -12,11 +12,14 @@
  *
  */
 
+#include "digital_agc.h"
+
+#include <assert.h>
 #include <string.h>
 #ifdef AGC_DEBUG
 #include <stdio.h>
 #endif
-#include "digital_agc.h"
+
 #include "gain_control.h"
 
 // To generate the gaintable, copy&paste the following lines to a Matlab window:
@@ -33,7 +36,8 @@
 // zoom on;
 
 // Generator table for y=log2(1+e^x) in Q8.
-static const WebRtc_UWord16 kGenFuncTable[128] = {
+enum { kGenFuncTableSize = 128 };
+static const WebRtc_UWord16 kGenFuncTable[kGenFuncTableSize] = {
           256,   485,   786,  1126,  1484,  1849,  2217,  2586,
          2955,  3324,  3693,  4063,  4432,  4801,  5171,  5540,
          5909,  6279,  6648,  7017,  7387,  7756,  8125,  8495,
@@ -102,8 +106,9 @@
     //           = (compRatio-1)*digCompGaindB/compRatio
     tmp32no1 = WEBRTC_SPL_MUL_16_16(digCompGaindB, kCompRatio - 1);
     diffGain = WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio);
-    if (diffGain < 0)
+    if (diffGain < 0 || diffGain >= kGenFuncTableSize)
     {
+        assert(0);
         return -1;
     }
 
@@ -185,8 +190,15 @@
         numFIX -= WEBRTC_SPL_MUL_32_16((WebRtc_Word32)logApprox, diffGain); // Q14
 
         // Calculate ratio
-        // Shift numFIX as much as possible
-        zeros = WebRtcSpl_NormW32(numFIX);
+        // Shift |numFIX| as much as possible.
+        // Ensure we avoid wrap-around in |den| as well.
+        if (numFIX > (den >> 8))  // |den| is Q8.
+        {
+            zeros = WebRtcSpl_NormW32(numFIX);
+        } else
+        {
+            zeros = WebRtcSpl_NormW32(den) + 8;
+        }
         numFIX = WEBRTC_SPL_LSHIFT_W32(numFIX, zeros); // Q(14+zeros)
 
         // Shift den so we end up in Qy1
@@ -327,10 +339,18 @@
         return -1;
     }
 
-    memcpy(out, in_near, 10 * L * sizeof(WebRtc_Word16));
+    // TODO(andrew): again, we don't need input and output pointers...
+    if (in_near != out)
+    {
+        // Only needed if they don't already point to the same place.
+        memcpy(out, in_near, 10 * L * sizeof(WebRtc_Word16));
+    }
     if (FS == 32000)
     {
-        memcpy(out_H, in_near_H, 10 * L * sizeof(WebRtc_Word16));
+        if (in_near_H != out_H)
+        {
+            memcpy(out_H, in_near_H, 10 * L * sizeof(WebRtc_Word16));
+        }
     }
     // VAD for near end
     logratio = WebRtcAgc_ProcessVad(&stt->vadNearend, out, L * 10);
@@ -652,11 +672,9 @@
     WebRtc_Word16 buf2[4];
     WebRtc_Word16 HPstate;
     WebRtc_Word16 zeros, dB;
-    WebRtc_Word16 *buf1_ptr;
 
     // process in 10 sub frames of 1 ms (to save on memory)
     nrg = 0;
-    buf1_ptr = &buf1[0];
     HPstate = state->HPstate;
     for (subfr = 0; subfr < 10; subfr++)
     {
diff --git a/src/modules/audio_processing/agc/main/source/digital_agc.h b/src/modules/audio_processing/agc/digital_agc.h
similarity index 100%
rename from src/modules/audio_processing/agc/main/source/digital_agc.h
rename to src/modules/audio_processing/agc/digital_agc.h
diff --git a/src/modules/audio_processing/agc/main/interface/gain_control.h b/src/modules/audio_processing/agc/interface/gain_control.h
similarity index 100%
rename from src/modules/audio_processing/agc/main/interface/gain_control.h
rename to src/modules/audio_processing/agc/interface/gain_control.h
diff --git a/src/modules/audio_processing/agc/main/matlab/getGains.m b/src/modules/audio_processing/agc/main/matlab/getGains.m
deleted file mode 100644
index e0234b8..0000000
--- a/src/modules/audio_processing/agc/main/matlab/getGains.m
+++ /dev/null
@@ -1,32 +0,0 @@
-% Outputs a file for testing purposes. 
-%
-% Adjust the following parameters to suit. Their purpose becomes more clear on
-% viewing the gain plots.
-% MaxGain: Max gain in dB
-% MinGain: Min gain at overload (0 dBov) in dB
-% CompRatio: Compression ratio, essentially determines the slope of the gain
-%            function between the max and min gains
-% Knee: The smoothness of the transition to max gain (smaller is smoother)
-MaxGain = 5; MinGain = 0; CompRatio = 3; Knee = 1;
-
-% Compute gains
-zeros = 0:31; lvl = 2.^(1-zeros); 
-A = -10*log10(lvl) * (CompRatio - 1) / CompRatio;
-B = MaxGain - MinGain;
-gains = round(2^16*10.^(0.05 * (MinGain + B * ( log(exp(-Knee*A)+exp(-Knee*B)) - log(1+exp(-Knee*B)) ) / log(1/(1+exp(Knee*B))))));
-fprintf(1, '\t%i, %i, %i, %i,\n', gains);
-
-% Save gains to file
-fid = fopen('gains', 'wb');
-if fid == -1
-	error(sprintf('Unable to open file %s', filename));
-	return
-end
-fwrite(fid, gains, 'int32');
-fclose(fid);
-
-% Plotting
-in = 10*log10(lvl); out = 20*log10(gains/65536);
-subplot(121); plot(in, out); axis([-60, 0, -5, 30]); grid on; xlabel('Input (dB)'); ylabel('Gain (dB)');
-subplot(122); plot(in, in+out); axis([-60, 0, -60, 10]); grid on; xlabel('Input (dB)'); ylabel('Output (dB)');
-zoom on;
diff --git a/src/modules/audio_processing/agc/main/source/Android.mk b/src/modules/audio_processing/agc/main/source/Android.mk
deleted file mode 100644
index e045839..0000000
--- a/src/modules/audio_processing/agc/main/source/Android.mk
+++ /dev/null
@@ -1,49 +0,0 @@
-# This file is generated by gyp; do not edit. This means you!
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_ARM_MODE := arm
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_MODULE := libwebrtc_agc
-LOCAL_MODULE_TAGS := optional
-LOCAL_GENERATED_SOURCES :=
-LOCAL_SRC_FILES := analog_agc.c \
-    digital_agc.c
-
-# Flags passed to both C and C++ files.
-MY_CFLAGS :=  
-MY_CFLAGS_C :=
-MY_DEFS := '-DNO_TCMALLOC' \
-    '-DNO_HEAPCHECKER' \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' \
-    '-DWEBRTC_THREAD_RR'
-ifeq ($(TARGET_ARCH),arm) 
-MY_DEFS += \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
-endif
-LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
-
-# Include paths placed before CFLAGS/CPPFLAGS
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/../../../../.. \
-    $(LOCAL_PATH)/../interface \
-    $(LOCAL_PATH)/../../../../../common_audio/signal_processing_library/main/interface 
-
-# Flags passed to only C++ (and not C) files.
-LOCAL_CPPFLAGS := 
-LOCAL_LDFLAGS :=
-
-LOCAL_STATIC_LIBRARIES :=
-# Duplicate the static libraries to fix circular references
-LOCAL_STATIC_LIBRARIES += $(LOCAL_STATIC_LIBRARIES)
-
-LOCAL_SHARED_LIBRARIES := libcutils \
-    libdl \
-    libstlport
-LOCAL_ADDITIONAL_DEPENDENCIES :=
-
-include external/stlport/libstlport.mk
-include $(BUILD_STATIC_LIBRARY)
diff --git a/src/modules/audio_processing/apm_tests.gypi b/src/modules/audio_processing/apm_tests.gypi
new file mode 100644
index 0000000..f9b21d2
--- /dev/null
+++ b/src/modules/audio_processing/apm_tests.gypi
@@ -0,0 +1,75 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+  'targets': [
+    {
+      'target_name': 'audioproc_unittest',
+      'type': 'executable',
+      'conditions': [
+        ['prefer_fixed_point==1', {
+          'defines': [ 'WEBRTC_APM_UNIT_TEST_FIXED_PROFILE' ],
+        }, {
+          'defines': [ 'WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE' ],
+        }],
+        ['enable_protobuf==1', {
+          'defines': [ 'WEBRTC_AUDIOPROC_DEBUG_DUMP' ],
+        }],
+      ],
+      'dependencies': [
+        'audio_processing',
+        'audioproc_unittest_proto',
+        '<(webrtc_root)/common_audio/common_audio.gyp:signal_processing',
+        '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+        '<(webrtc_root)/../test/test.gyp:test_support',
+        '<(webrtc_root)/../testing/gtest.gyp:gtest',
+      ],
+      'sources': [ 'test/unit_test.cc', ],
+    },
+    {
+      'target_name': 'audioproc_unittest_proto',
+      'type': 'static_library',
+      'sources': [ 'test/unittest.proto', ],
+      'variables': {
+        'proto_in_dir': 'test',
+        # Workaround to protect against gyp's pathname relativization when this
+        # file is included by modules.gyp.
+        'proto_out_protected': 'webrtc/audio_processing',
+        'proto_out_dir': '<(proto_out_protected)',
+      },
+      'includes': [ '../../build/protoc.gypi', ],
+    },
+  ],
+  'conditions': [
+    ['enable_protobuf==1', {
+      'targets': [
+        {
+          'target_name': 'audioproc',
+          'type': 'executable',
+          'dependencies': [
+            'audio_processing',
+            'audioproc_debug_proto',
+            '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+            '<(webrtc_root)/../testing/gtest.gyp:gtest',
+          ],
+          'sources': [ 'test/process_test.cc', ],
+        },
+        {
+          'target_name': 'unpack_aecdump',
+          'type': 'executable',
+          'dependencies': [
+            'audioproc_debug_proto',
+            '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+            '<(webrtc_root)/../third_party/google-gflags/google-gflags.gyp:google-gflags',
+          ],
+          'sources': [ 'test/unpack.cc', ],
+        },
+      ],
+    }],
+  ],
+}
diff --git a/src/modules/audio_processing/audio_buffer.cc b/src/modules/audio_processing/audio_buffer.cc
new file mode 100644
index 0000000..a7fb04d
--- /dev/null
+++ b/src/modules/audio_processing/audio_buffer.cc
@@ -0,0 +1,306 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio_buffer.h"
+
+#include "signal_processing_library.h"
+
+namespace webrtc {
+namespace {
+
+enum {
+  kSamplesPer8kHzChannel = 80,
+  kSamplesPer16kHzChannel = 160,
+  kSamplesPer32kHzChannel = 320
+};
+
+void StereoToMono(const int16_t* left, const int16_t* right,
+                  int16_t* out, int samples_per_channel) {
+  assert(left != NULL && right != NULL && out != NULL);
+  for (int i = 0; i < samples_per_channel; i++) {
+    int32_t data32 = (static_cast<int32_t>(left[i]) +
+                      static_cast<int32_t>(right[i])) >> 1;
+
+    out[i] = WebRtcSpl_SatW32ToW16(data32);
+  }
+}
+}  // namespace
+
+struct AudioChannel {
+  AudioChannel() {
+    memset(data, 0, sizeof(data));
+  }
+
+  int16_t data[kSamplesPer32kHzChannel];
+};
+
+struct SplitAudioChannel {
+  SplitAudioChannel() {
+    memset(low_pass_data, 0, sizeof(low_pass_data));
+    memset(high_pass_data, 0, sizeof(high_pass_data));
+    memset(analysis_filter_state1, 0, sizeof(analysis_filter_state1));
+    memset(analysis_filter_state2, 0, sizeof(analysis_filter_state2));
+    memset(synthesis_filter_state1, 0, sizeof(synthesis_filter_state1));
+    memset(synthesis_filter_state2, 0, sizeof(synthesis_filter_state2));
+  }
+
+  int16_t low_pass_data[kSamplesPer16kHzChannel];
+  int16_t high_pass_data[kSamplesPer16kHzChannel];
+
+  WebRtc_Word32 analysis_filter_state1[6];
+  WebRtc_Word32 analysis_filter_state2[6];
+  WebRtc_Word32 synthesis_filter_state1[6];
+  WebRtc_Word32 synthesis_filter_state2[6];
+};
+
+// TODO(andrew): check range of input parameters?
+AudioBuffer::AudioBuffer(int max_num_channels,
+                         int samples_per_channel)
+  : max_num_channels_(max_num_channels),
+    num_channels_(0),
+    num_mixed_channels_(0),
+    num_mixed_low_pass_channels_(0),
+    data_was_mixed_(false),
+    samples_per_channel_(samples_per_channel),
+    samples_per_split_channel_(samples_per_channel),
+    reference_copied_(false),
+    activity_(AudioFrame::kVadUnknown),
+    is_muted_(false),
+    data_(NULL),
+    channels_(NULL),
+    split_channels_(NULL),
+    mixed_channels_(NULL),
+    mixed_low_pass_channels_(NULL),
+    low_pass_reference_channels_(NULL) {
+  if (max_num_channels_ > 1) {
+    channels_.reset(new AudioChannel[max_num_channels_]);
+    mixed_channels_.reset(new AudioChannel[max_num_channels_]);
+    mixed_low_pass_channels_.reset(new AudioChannel[max_num_channels_]);
+  }
+  low_pass_reference_channels_.reset(new AudioChannel[max_num_channels_]);
+
+  if (samples_per_channel_ == kSamplesPer32kHzChannel) {
+    split_channels_.reset(new SplitAudioChannel[max_num_channels_]);
+    samples_per_split_channel_ = kSamplesPer16kHzChannel;
+  }
+}
+
+AudioBuffer::~AudioBuffer() {}
+
+int16_t* AudioBuffer::data(int channel) const {
+  assert(channel >= 0 && channel < num_channels_);
+  if (data_ != NULL) {
+    return data_;
+  }
+
+  return channels_[channel].data;
+}
+
+int16_t* AudioBuffer::low_pass_split_data(int channel) const {
+  assert(channel >= 0 && channel < num_channels_);
+  if (split_channels_.get() == NULL) {
+    return data(channel);
+  }
+
+  return split_channels_[channel].low_pass_data;
+}
+
+int16_t* AudioBuffer::high_pass_split_data(int channel) const {
+  assert(channel >= 0 && channel < num_channels_);
+  if (split_channels_.get() == NULL) {
+    return NULL;
+  }
+
+  return split_channels_[channel].high_pass_data;
+}
+
+int16_t* AudioBuffer::mixed_data(int channel) const {
+  assert(channel >= 0 && channel < num_mixed_channels_);
+
+  return mixed_channels_[channel].data;
+}
+
+int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
+  assert(channel >= 0 && channel < num_mixed_low_pass_channels_);
+
+  return mixed_low_pass_channels_[channel].data;
+}
+
+int16_t* AudioBuffer::low_pass_reference(int channel) const {
+  assert(channel >= 0 && channel < num_channels_);
+  if (!reference_copied_) {
+    return NULL;
+  }
+
+  return low_pass_reference_channels_[channel].data;
+}
+
+WebRtc_Word32* AudioBuffer::analysis_filter_state1(int channel) const {
+  assert(channel >= 0 && channel < num_channels_);
+  return split_channels_[channel].analysis_filter_state1;
+}
+
+WebRtc_Word32* AudioBuffer::analysis_filter_state2(int channel) const {
+  assert(channel >= 0 && channel < num_channels_);
+  return split_channels_[channel].analysis_filter_state2;
+}
+
+WebRtc_Word32* AudioBuffer::synthesis_filter_state1(int channel) const {
+  assert(channel >= 0 && channel < num_channels_);
+  return split_channels_[channel].synthesis_filter_state1;
+}
+
+WebRtc_Word32* AudioBuffer::synthesis_filter_state2(int channel) const {
+  assert(channel >= 0 && channel < num_channels_);
+  return split_channels_[channel].synthesis_filter_state2;
+}
+
+void AudioBuffer::set_activity(AudioFrame::VADActivity activity) {
+  activity_ = activity;
+}
+
+AudioFrame::VADActivity AudioBuffer::activity() const {
+  return activity_;
+}
+
+bool AudioBuffer::is_muted() const {
+  return is_muted_;
+}
+
+int AudioBuffer::num_channels() const {
+  return num_channels_;
+}
+
+int AudioBuffer::samples_per_channel() const {
+  return samples_per_channel_;
+}
+
+int AudioBuffer::samples_per_split_channel() const {
+  return samples_per_split_channel_;
+}
+
+// TODO(andrew): Do deinterleaving and mixing in one step?
+void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
+  assert(frame->_audioChannel <= max_num_channels_);
+  assert(frame->_payloadDataLengthInSamples ==  samples_per_channel_);
+
+  num_channels_ = frame->_audioChannel;
+  data_was_mixed_ = false;
+  num_mixed_channels_ = 0;
+  num_mixed_low_pass_channels_ = 0;
+  reference_copied_ = false;
+  activity_ = frame->_vadActivity;
+  is_muted_ = false;
+  if (frame->_energy == 0) {
+    is_muted_ = true;
+  }
+
+  if (num_channels_ == 1) {
+    // We can get away with a pointer assignment in this case.
+    data_ = frame->_payloadData;
+    return;
+  }
+
+  int16_t* interleaved = frame->_payloadData;
+  for (int i = 0; i < num_channels_; i++) {
+    int16_t* deinterleaved = channels_[i].data;
+    int interleaved_idx = i;
+    for (int j = 0; j < samples_per_channel_; j++) {
+      deinterleaved[j] = interleaved[interleaved_idx];
+      interleaved_idx += num_channels_;
+    }
+  }
+}
+
+void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
+  assert(frame->_audioChannel == num_channels_);
+  assert(frame->_payloadDataLengthInSamples == samples_per_channel_);
+  frame->_vadActivity = activity_;
+
+  if (!data_changed) {
+    return;
+  }
+
+  if (num_channels_ == 1) {
+    if (data_was_mixed_) {
+      memcpy(frame->_payloadData,
+             channels_[0].data,
+             sizeof(int16_t) * samples_per_channel_);
+    } else {
+      // These should point to the same buffer in this case.
+      assert(data_ == frame->_payloadData);
+    }
+
+    return;
+  }
+
+  int16_t* interleaved = frame->_payloadData;
+  for (int i = 0; i < num_channels_; i++) {
+    int16_t* deinterleaved = channels_[i].data;
+    int interleaved_idx = i;
+    for (int j = 0; j < samples_per_channel_; j++) {
+      interleaved[interleaved_idx] = deinterleaved[j];
+      interleaved_idx += num_channels_;
+    }
+  }
+}
+
+// TODO(andrew): would be good to support the no-mix case with pointer
+// assignment.
+// TODO(andrew): handle mixing to multiple channels?
+void AudioBuffer::Mix(int num_mixed_channels) {
+  // We currently only support the stereo to mono case.
+  assert(num_channels_ == 2);
+  assert(num_mixed_channels == 1);
+
+  StereoToMono(channels_[0].data,
+               channels_[1].data,
+               channels_[0].data,
+               samples_per_channel_);
+
+  num_channels_ = num_mixed_channels;
+  data_was_mixed_ = true;
+}
+
+void AudioBuffer::CopyAndMix(int num_mixed_channels) {
+  // We currently only support the stereo to mono case.
+  assert(num_channels_ == 2);
+  assert(num_mixed_channels == 1);
+
+  StereoToMono(channels_[0].data,
+               channels_[1].data,
+               mixed_channels_[0].data,
+               samples_per_channel_);
+
+  num_mixed_channels_ = num_mixed_channels;
+}
+
+void AudioBuffer::CopyAndMixLowPass(int num_mixed_channels) {
+  // We currently only support the stereo to mono case.
+  assert(num_channels_ == 2);
+  assert(num_mixed_channels == 1);
+
+  StereoToMono(low_pass_split_data(0),
+               low_pass_split_data(1),
+               mixed_low_pass_channels_[0].data,
+               samples_per_split_channel_);
+
+  num_mixed_low_pass_channels_ = num_mixed_channels;
+}
+
+void AudioBuffer::CopyLowPassToReference() {
+  reference_copied_ = true;
+  for (int i = 0; i < num_channels_; i++) {
+    memcpy(low_pass_reference_channels_[i].data,
+           low_pass_split_data(i),
+           sizeof(int16_t) * samples_per_split_channel_);
+  }
+}
+}  // namespace webrtc
diff --git a/src/modules/audio_processing/audio_buffer.h b/src/modules/audio_processing/audio_buffer.h
new file mode 100644
index 0000000..87d6972
--- /dev/null
+++ b/src/modules/audio_processing/audio_buffer.h
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
+
+#include "module_common_types.h"
+#include "scoped_ptr.h"
+#include "typedefs.h"
+
+namespace webrtc {
+
+struct AudioChannel;
+struct SplitAudioChannel;
+
+class AudioBuffer {
+ public:
+  AudioBuffer(int max_num_channels, int samples_per_channel);
+  virtual ~AudioBuffer();
+
+  int num_channels() const;
+  int samples_per_channel() const;
+  int samples_per_split_channel() const;
+
+  int16_t* data(int channel) const;
+  int16_t* low_pass_split_data(int channel) const;
+  int16_t* high_pass_split_data(int channel) const;
+  int16_t* mixed_data(int channel) const;
+  int16_t* mixed_low_pass_data(int channel) const;
+  int16_t* low_pass_reference(int channel) const;
+
+  int32_t* analysis_filter_state1(int channel) const;
+  int32_t* analysis_filter_state2(int channel) const;
+  int32_t* synthesis_filter_state1(int channel) const;
+  int32_t* synthesis_filter_state2(int channel) const;
+
+  void set_activity(AudioFrame::VADActivity activity);
+  AudioFrame::VADActivity activity() const;
+
+  bool is_muted() const;
+
+  void DeinterleaveFrom(AudioFrame* audioFrame);
+  void InterleaveTo(AudioFrame* audioFrame) const;
+  // If |data_changed| is false, only the non-audio data members will be copied
+  // to |frame|.
+  void InterleaveTo(AudioFrame* frame, bool data_changed) const;
+  void Mix(int num_mixed_channels);
+  void CopyAndMix(int num_mixed_channels);
+  void CopyAndMixLowPass(int num_mixed_channels);
+  void CopyLowPassToReference();
+
+ private:
+  const int max_num_channels_;
+  int num_channels_;
+  int num_mixed_channels_;
+  int num_mixed_low_pass_channels_;
+  // Whether the original data was replaced with mixed data.
+  bool data_was_mixed_;
+  const int samples_per_channel_;
+  int samples_per_split_channel_;
+  bool reference_copied_;
+  AudioFrame::VADActivity activity_;
+  bool is_muted_;
+
+  int16_t* data_;
+  scoped_array<AudioChannel> channels_;
+  scoped_array<SplitAudioChannel> split_channels_;
+  scoped_array<AudioChannel> mixed_channels_;
+  // TODO(andrew): improve this, we don't need the full 32 kHz space here.
+  scoped_array<AudioChannel> mixed_low_pass_channels_;
+  scoped_array<AudioChannel> low_pass_reference_channels_;
+};
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
diff --git a/src/modules/audio_processing/audio_processing.gypi b/src/modules/audio_processing/audio_processing.gypi
new file mode 100644
index 0000000..2a22a79
--- /dev/null
+++ b/src/modules/audio_processing/audio_processing.gypi
@@ -0,0 +1,91 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+  'targets': [
+    {
+      'target_name': 'audio_processing',
+      'type': '<(library)',
+      'conditions': [
+        ['prefer_fixed_point==1', {
+          'dependencies': [ 'ns_fix' ],
+          'defines': [ 'WEBRTC_NS_FIXED' ],
+        }, {
+          'dependencies': [ 'ns' ],
+          'defines': [ 'WEBRTC_NS_FLOAT' ],
+        }],
+        ['enable_protobuf==1', {
+          'dependencies': [ 'audioproc_debug_proto' ],
+          'defines': [ 'WEBRTC_AUDIOPROC_DEBUG_DUMP' ],
+        }],
+      ],
+      'dependencies': [
+        'aec',
+        'aecm',
+        'agc',
+        '<(webrtc_root)/common_audio/common_audio.gyp:signal_processing',
+        '<(webrtc_root)/common_audio/common_audio.gyp:vad',
+        '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+      ],
+      'include_dirs': [
+        'interface',
+        '../interface',
+      ],
+      'direct_dependent_settings': {
+        'include_dirs': [
+          'interface',
+          '../interface',
+        ],
+      },
+      'sources': [
+        'interface/audio_processing.h',
+        'audio_buffer.cc',
+        'audio_buffer.h',
+        'audio_processing_impl.cc',
+        'audio_processing_impl.h',
+        'echo_cancellation_impl.cc',
+        'echo_cancellation_impl.h',
+        'echo_control_mobile_impl.cc',
+        'echo_control_mobile_impl.h',
+        'gain_control_impl.cc',
+        'gain_control_impl.h',
+        'high_pass_filter_impl.cc',
+        'high_pass_filter_impl.h',
+        'level_estimator_impl.cc',
+        'level_estimator_impl.h',
+        'noise_suppression_impl.cc',
+        'noise_suppression_impl.h',
+        'splitting_filter.cc',
+        'splitting_filter.h',
+        'processing_component.cc',
+        'processing_component.h',
+        'voice_detection_impl.cc',
+        'voice_detection_impl.h',
+      ],
+    },
+  ],
+  'conditions': [
+    ['enable_protobuf==1', {
+      'targets': [
+        {
+          'target_name': 'audioproc_debug_proto',
+          'type': 'static_library',
+          'sources': [ 'debug.proto', ],
+          'variables': {
+            'proto_in_dir': '.',
+            # Workaround to protect against gyp's pathname relativization when
+            # this file is included by modules.gyp.
+            'proto_out_protected': 'webrtc/audio_processing',
+            'proto_out_dir': '<(proto_out_protected)',
+          },
+          'includes': [ '../../build/protoc.gypi', ],
+        },
+      ],
+    }],
+  ],
+}
diff --git a/src/modules/audio_processing/main/source/audio_processing_impl.cc b/src/modules/audio_processing/audio_processing_impl.cc
similarity index 66%
rename from src/modules/audio_processing/main/source/audio_processing_impl.cc
rename to src/modules/audio_processing/audio_processing_impl.cc
index 6440e36..9702e9e 100644
--- a/src/modules/audio_processing/main/source/audio_processing_impl.cc
+++ b/src/modules/audio_processing/audio_processing_impl.cc
@@ -10,36 +10,32 @@
 
 #include "audio_processing_impl.h"
 
-#include <cassert>
-
-#include "module_common_types.h"
-
-#include "critical_section_wrapper.h"
-#include "file_wrapper.h"
+#include <assert.h>
 
 #include "audio_buffer.h"
+#include "critical_section_wrapper.h"
 #include "echo_cancellation_impl.h"
 #include "echo_control_mobile_impl.h"
+#include "file_wrapper.h"
 #include "high_pass_filter_impl.h"
 #include "gain_control_impl.h"
 #include "level_estimator_impl.h"
+#include "module_common_types.h"
 #include "noise_suppression_impl.h"
 #include "processing_component.h"
 #include "splitting_filter.h"
 #include "voice_detection_impl.h"
 
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+// Files generated at build-time by the protobuf compiler.
+#ifdef WEBRTC_ANDROID
+#include "external/webrtc/src/modules/audio_processing/debug.pb.h"
+#else
+#include "webrtc/audio_processing/debug.pb.h"
+#endif
+#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
+
 namespace webrtc {
-namespace {
-
-enum Events {
-  kInitializeEvent,
-  kRenderEvent,
-  kCaptureEvent
-};
-
-const char kMagicNumber[] = "#!vqetrace1.2";
-}  // namespace
-
 AudioProcessing* AudioProcessing::Create(int id) {
   /*WEBRTC_TRACE(webrtc::kTraceModuleCall,
              webrtc::kTraceAudioProcessing,
@@ -68,18 +64,21 @@
       level_estimator_(NULL),
       noise_suppression_(NULL),
       voice_detection_(NULL),
-      debug_file_(FileWrapper::Create()),
       crit_(CriticalSectionWrapper::CreateCriticalSection()),
       render_audio_(NULL),
       capture_audio_(NULL),
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+      debug_file_(FileWrapper::Create()),
+      event_msg_(new audioproc::Event()),
+#endif
       sample_rate_hz_(kSampleRate16kHz),
       split_sample_rate_hz_(kSampleRate16kHz),
       samples_per_channel_(sample_rate_hz_ / 100),
       stream_delay_ms_(0),
       was_stream_delay_set_(false),
-      num_render_input_channels_(1),
-      num_capture_input_channels_(1),
-      num_capture_output_channels_(1) {
+      num_reverse_channels_(1),
+      num_input_channels_(1),
+      num_output_channels_(1) {
 
   echo_cancellation_ = new EchoCancellationImpl(this);
   component_list_.push_back(echo_cancellation_);
@@ -111,21 +110,21 @@
     component_list_.pop_front();
   }
 
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
   if (debug_file_->Open()) {
     debug_file_->CloseFile();
   }
-  delete debug_file_;
-  debug_file_ = NULL;
+#endif
 
   delete crit_;
   crit_ = NULL;
 
-  if (render_audio_ != NULL) {
+  if (render_audio_) {
     delete render_audio_;
     render_audio_ = NULL;
   }
 
-  if (capture_audio_ != NULL) {
+  if (capture_audio_) {
     delete capture_audio_;
     capture_audio_ = NULL;
   }
@@ -155,9 +154,9 @@
     capture_audio_ = NULL;
   }
 
-  render_audio_ = new AudioBuffer(num_render_input_channels_,
+  render_audio_ = new AudioBuffer(num_reverse_channels_,
                                   samples_per_channel_);
-  capture_audio_ = new AudioBuffer(num_capture_input_channels_,
+  capture_audio_ = new AudioBuffer(num_input_channels_,
                                    samples_per_channel_);
 
   was_stream_delay_set_ = false;
@@ -171,6 +170,15 @@
     }
   }
 
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+  if (debug_file_->Open()) {
+    int err = WriteInitMessage();
+    if (err != kNoError) {
+      return err;
+    }
+  }
+#endif
+
   return kNoError;
 }
 
@@ -205,13 +213,13 @@
     return kBadParameterError;
   }
 
-  num_render_input_channels_ = channels;
+  num_reverse_channels_ = channels;
 
   return InitializeLocked();
 }
 
 int AudioProcessingImpl::num_reverse_channels() const {
-  return num_render_input_channels_;
+  return num_reverse_channels_;
 }
 
 int AudioProcessingImpl::set_num_channels(
@@ -231,18 +239,18 @@
     return kBadParameterError;
   }
 
-  num_capture_input_channels_ = input_channels;
-  num_capture_output_channels_ = output_channels;
+  num_input_channels_ = input_channels;
+  num_output_channels_ = output_channels;
 
   return InitializeLocked();
 }
 
 int AudioProcessingImpl::num_input_channels() const {
-  return num_capture_input_channels_;
+  return num_input_channels_;
 }
 
 int AudioProcessingImpl::num_output_channels() const {
-  return num_capture_output_channels_;
+  return num_output_channels_;
 }
 
 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
@@ -253,12 +261,11 @@
     return kNullPointerError;
   }
 
-  if (frame->_frequencyInHz !=
-      static_cast<WebRtc_UWord32>(sample_rate_hz_)) {
+  if (frame->_frequencyInHz != sample_rate_hz_) {
     return kBadSampleRateError;
   }
 
-  if (frame->_audioChannel != num_capture_input_channels_) {
+  if (frame->_audioChannel != num_input_channels_) {
     return kBadNumberChannelsError;
   }
 
@@ -266,45 +273,31 @@
     return kBadDataLengthError;
   }
 
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
   if (debug_file_->Open()) {
-    WebRtc_UWord8 event = kCaptureEvent;
-    if (!debug_file_->Write(&event, sizeof(event))) {
-      return kFileError;
-    }
-
-    if (!debug_file_->Write(&frame->_frequencyInHz,
-                                   sizeof(frame->_frequencyInHz))) {
-      return kFileError;
-    }
-
-    if (!debug_file_->Write(&frame->_audioChannel,
-                                   sizeof(frame->_audioChannel))) {
-      return kFileError;
-    }
-
-    if (!debug_file_->Write(&frame->_payloadDataLengthInSamples,
-        sizeof(frame->_payloadDataLengthInSamples))) {
-      return kFileError;
-    }
-
-    if (!debug_file_->Write(frame->_payloadData,
-        sizeof(WebRtc_Word16) * frame->_payloadDataLengthInSamples *
-        frame->_audioChannel)) {
-      return kFileError;
-    }
+    event_msg_->set_type(audioproc::Event::STREAM);
+    audioproc::Stream* msg = event_msg_->mutable_stream();
+    const size_t data_size = sizeof(int16_t) *
+                             frame->_payloadDataLengthInSamples *
+                             frame->_audioChannel;
+    msg->set_input_data(frame->_payloadData, data_size);
+    msg->set_delay(stream_delay_ms_);
+    msg->set_drift(echo_cancellation_->stream_drift_samples());
+    msg->set_level(gain_control_->stream_analog_level());
   }
+#endif
 
   capture_audio_->DeinterleaveFrom(frame);
 
   // TODO(ajm): experiment with mixing and AEC placement.
-  if (num_capture_output_channels_ < num_capture_input_channels_) {
-    capture_audio_->Mix(num_capture_output_channels_);
-
-    frame->_audioChannel = num_capture_output_channels_;
+  if (num_output_channels_ < num_input_channels_) {
+    capture_audio_->Mix(num_output_channels_);
+    frame->_audioChannel = num_output_channels_;
   }
 
-  if (sample_rate_hz_ == kSampleRate32kHz) {
-    for (int i = 0; i < num_capture_input_channels_; i++) {
+  bool data_changed = stream_data_changed();
+  if (analysis_needed(data_changed)) {
+    for (int i = 0; i < num_output_channels_; i++) {
       // Split into a low and high band.
       SplittingFilterAnalysis(capture_audio_->data(i),
                               capture_audio_->low_pass_split_data(i),
@@ -354,13 +347,8 @@
     return err;
   }
 
-  //err = level_estimator_->ProcessCaptureAudio(capture_audio_);
-  //if (err != kNoError) {
-  //  return err;
-  //}
-
-  if (sample_rate_hz_ == kSampleRate32kHz) {
-    for (int i = 0; i < num_capture_output_channels_; i++) {
+  if (synthesis_needed(data_changed)) {
+    for (int i = 0; i < num_output_channels_; i++) {
       // Recombine low and high bands.
       SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
                                capture_audio_->high_pass_split_data(i),
@@ -370,8 +358,29 @@
     }
   }
 
-  capture_audio_->InterleaveTo(frame);
+  // The level estimator operates on the recombined data.
+  err = level_estimator_->ProcessStream(capture_audio_);
+  if (err != kNoError) {
+    return err;
+  }
 
+  capture_audio_->InterleaveTo(frame, data_changed);
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+  if (debug_file_->Open()) {
+    audioproc::Stream* msg = event_msg_->mutable_stream();
+    const size_t data_size = sizeof(int16_t) *
+                             frame->_payloadDataLengthInSamples *
+                             frame->_audioChannel;
+    msg->set_output_data(frame->_payloadData, data_size);
+    err = WriteMessageToDebugFile();
+    if (err != kNoError) {
+      return err;
+    }
+  }
+#endif
+
+  was_stream_delay_set_ = false;
   return kNoError;
 }
 
@@ -383,12 +392,11 @@
     return kNullPointerError;
   }
 
-  if (frame->_frequencyInHz !=
-      static_cast<WebRtc_UWord32>(sample_rate_hz_)) {
+  if (frame->_frequencyInHz != sample_rate_hz_) {
     return kBadSampleRateError;
   }
 
-  if (frame->_audioChannel != num_render_input_channels_) {
+  if (frame->_audioChannel != num_reverse_channels_) {
     return kBadNumberChannelsError;
   }
 
@@ -396,39 +404,26 @@
     return kBadDataLengthError;
   }
 
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
   if (debug_file_->Open()) {
-    WebRtc_UWord8 event = kRenderEvent;
-    if (!debug_file_->Write(&event, sizeof(event))) {
-      return kFileError;
-    }
-
-    if (!debug_file_->Write(&frame->_frequencyInHz,
-                                   sizeof(frame->_frequencyInHz))) {
-      return kFileError;
-    }
-
-    if (!debug_file_->Write(&frame->_audioChannel,
-                                   sizeof(frame->_audioChannel))) {
-      return kFileError;
-    }
-
-    if (!debug_file_->Write(&frame->_payloadDataLengthInSamples,
-        sizeof(frame->_payloadDataLengthInSamples))) {
-      return kFileError;
-    }
-
-    if (!debug_file_->Write(frame->_payloadData,
-        sizeof(WebRtc_Word16) * frame->_payloadDataLengthInSamples *
-        frame->_audioChannel)) {
-      return kFileError;
+    event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
+    audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
+    const size_t data_size = sizeof(int16_t) *
+                             frame->_payloadDataLengthInSamples *
+                             frame->_audioChannel;
+    msg->set_data(frame->_payloadData, data_size);
+    err = WriteMessageToDebugFile();
+    if (err != kNoError) {
+      return err;
     }
   }
+#endif
 
   render_audio_->DeinterleaveFrom(frame);
 
   // TODO(ajm): turn the splitting filter into a component?
   if (sample_rate_hz_ == kSampleRate32kHz) {
-    for (int i = 0; i < num_render_input_channels_; i++) {
+    for (int i = 0; i < num_reverse_channels_; i++) {
       // Split into low and high band.
       SplittingFilterAnalysis(render_audio_->data(i),
                               render_audio_->low_pass_split_data(i),
@@ -454,12 +449,6 @@
     return err;
   }
 
-  //err = level_estimator_->AnalyzeReverseStream(render_audio_);
-  //if (err != kNoError) {
-  //  return err;
-  //}
-
-  was_stream_delay_set_ = false;
   return err;  // TODO(ajm): this is for returning warnings; necessary?
 }
 
@@ -496,6 +485,7 @@
     return kNullPointerError;
   }
 
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
   // Stop any ongoing recording.
   if (debug_file_->Open()) {
     if (debug_file_->CloseFile() == -1) {
@@ -508,35 +498,30 @@
     return kFileError;
   }
 
-  if (debug_file_->WriteText("%s\n", kMagicNumber) == -1) {
-    debug_file_->CloseFile();
-    return kFileError;
+  int err = WriteInitMessage();
+  if (err != kNoError) {
+    return err;
   }
-
-  // TODO(ajm): should we do this? If so, we need the number of channels etc.
-  // Record the default sample rate.
-  WebRtc_UWord8 event = kInitializeEvent;
-  if (!debug_file_->Write(&event, sizeof(event))) {
-    return kFileError;
-  }
-
-  if (!debug_file_->Write(&sample_rate_hz_, sizeof(sample_rate_hz_))) {
-    return kFileError;
-  }
-
   return kNoError;
+#else
+  return kUnsupportedFunctionError;
+#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
 }
 
 int AudioProcessingImpl::StopDebugRecording() {
   CriticalSectionScoped crit_scoped(*crit_);
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
   // We just return if recording hasn't started.
   if (debug_file_->Open()) {
     if (debug_file_->CloseFile() == -1) {
       return kFileError;
     }
   }
-
   return kNoError;
+#else
+  return kUnsupportedFunctionError;
+#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
 }
 
 EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
@@ -567,61 +552,6 @@
   return voice_detection_;
 }
 
-WebRtc_Word32 AudioProcessingImpl::Version(WebRtc_Word8* version,
-    WebRtc_UWord32& bytes_remaining, WebRtc_UWord32& position) const {
-  if (version == NULL) {
-    /*WEBRTC_TRACE(webrtc::kTraceError,
-               webrtc::kTraceAudioProcessing,
-               -1,
-               "Null version pointer");*/
-    return kNullPointerError;
-  }
-  memset(&version[position], 0, bytes_remaining);
-
-  WebRtc_Word8 my_version[] = "AudioProcessing 1.0.0";
-  // Includes null termination.
-  WebRtc_UWord32 length = static_cast<WebRtc_UWord32>(strlen(my_version));
-  if (bytes_remaining < length) {
-    /*WEBRTC_TRACE(webrtc::kTraceError,
-               webrtc::kTraceAudioProcessing,
-               -1,
-               "Buffer of insufficient length");*/
-    return kBadParameterError;
-  }
-  memcpy(&version[position], my_version, length);
-  bytes_remaining -= length;
-  position += length;
-
-  std::list<ProcessingComponent*>::const_iterator it;
-  for (it = component_list_.begin(); it != component_list_.end(); it++) {
-    char component_version[256];
-    strcpy(component_version, "\n");
-    int err = (*it)->get_version(&component_version[1],
-                                 sizeof(component_version) - 1);
-    if (err != kNoError) {
-      return err;
-    }
-    if (strncmp(&component_version[1], "\0", 1) == 0) {
-      // Assume empty if first byte is NULL.
-      continue;
-    }
-
-    length = static_cast<WebRtc_UWord32>(strlen(component_version));
-    if (bytes_remaining < length) {
-      /*WEBRTC_TRACE(webrtc::kTraceError,
-                 webrtc::kTraceAudioProcessing,
-                 -1,
-                 "Buffer of insufficient length");*/
-      return kBadParameterError;
-    }
-    memcpy(&version[position], component_version, length);
-    bytes_remaining -= length;
-    position += length;
-  }
-
-  return kNoError;
-}
-
 WebRtc_Word32 AudioProcessingImpl::ChangeUniqueId(const WebRtc_Word32 id) {
   CriticalSectionScoped crit_scoped(*crit_);
   /*WEBRTC_TRACE(webrtc::kTraceModuleCall,
@@ -633,4 +563,90 @@
 
   return kNoError;
 }
+
+bool AudioProcessingImpl::stream_data_changed() const {
+  int enabled_count = 0;
+  std::list<ProcessingComponent*>::const_iterator it;
+  for (it = component_list_.begin(); it != component_list_.end(); it++) {
+    if ((*it)->is_component_enabled()) {
+      enabled_count++;
+    }
+  }
+
+  // Data is unchanged if no components are enabled, or if only level_estimator_
+  // or voice_detection_ is enabled.
+  if (enabled_count == 0) {
+    return false;
+  } else if (enabled_count == 1) {
+    if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
+      return false;
+    }
+  } else if (enabled_count == 2) {
+    if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool AudioProcessingImpl::synthesis_needed(bool stream_data_changed) const {
+  return (stream_data_changed && sample_rate_hz_ == kSampleRate32kHz);
+}
+
+bool AudioProcessingImpl::analysis_needed(bool stream_data_changed) const {
+  if (!stream_data_changed && !voice_detection_->is_enabled()) {
+    // Only level_estimator_ is enabled.
+    return false;
+  } else if (sample_rate_hz_ == kSampleRate32kHz) {
+    // Something besides level_estimator_ is enabled, and we have super-wb.
+    return true;
+  }
+  return false;
+}
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+int AudioProcessingImpl::WriteMessageToDebugFile() {
+  int32_t size = event_msg_->ByteSize();
+  if (size <= 0) {
+    return kUnspecifiedError;
+  }
+#if defined(WEBRTC_BIG_ENDIAN)
+  // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
+  //            pretty safe in assuming little-endian.
+#endif
+
+  if (!event_msg_->SerializeToString(&event_str_)) {
+    return kUnspecifiedError;
+  }
+
+  // Write message preceded by its size.
+  if (!debug_file_->Write(&size, sizeof(int32_t))) {
+    return kFileError;
+  }
+  if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
+    return kFileError;
+  }
+
+  event_msg_->Clear();
+
+  return 0;
+}
+
+int AudioProcessingImpl::WriteInitMessage() {
+  event_msg_->set_type(audioproc::Event::INIT);
+  audioproc::Init* msg = event_msg_->mutable_init();
+  msg->set_sample_rate(sample_rate_hz_);
+  msg->set_device_sample_rate(echo_cancellation_->device_sample_rate_hz());
+  msg->set_num_input_channels(num_input_channels_);
+  msg->set_num_output_channels(num_output_channels_);
+  msg->set_num_reverse_channels(num_reverse_channels_);
+
+  int err = WriteMessageToDebugFile();
+  if (err != kNoError) {
+    return err;
+  }
+
+  return kNoError;
+}
+#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
 }  // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/audio_processing_impl.h b/src/modules/audio_processing/audio_processing_impl.h
similarity index 79%
rename from src/modules/audio_processing/main/source/audio_processing_impl.h
rename to src/modules/audio_processing/audio_processing_impl.h
index 9707bde..c1ab476 100644
--- a/src/modules/audio_processing/main/source/audio_processing_impl.h
+++ b/src/modules/audio_processing/audio_processing_impl.h
@@ -11,17 +11,19 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
 
-#include <list>
-
 #include "audio_processing.h"
 
-namespace webrtc {
-class CriticalSectionWrapper;
-class FileWrapper;
+#include <list>
+#include <string>
 
+#include "scoped_ptr.h"
+
+namespace webrtc {
 class AudioBuffer;
+class CriticalSectionWrapper;
 class EchoCancellationImpl;
 class EchoControlMobileImpl;
+class FileWrapper;
 class GainControlImpl;
 class HighPassFilterImpl;
 class LevelEstimatorImpl;
@@ -29,6 +31,14 @@
 class ProcessingComponent;
 class VoiceDetectionImpl;
 
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+namespace audioproc {
+
+class Event;
+
+}  // namespace audioproc
+#endif
+
 class AudioProcessingImpl : public AudioProcessing {
  public:
   enum {
@@ -70,12 +80,13 @@
   virtual VoiceDetection* voice_detection() const;
 
   // Module methods.
-  virtual WebRtc_Word32 Version(WebRtc_Word8* version,
-                              WebRtc_UWord32& remainingBufferInBytes,
-                              WebRtc_UWord32& position) const;
   virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
 
  private:
+  bool stream_data_changed() const;
+  bool synthesis_needed(bool stream_data_changed) const;
+  bool analysis_needed(bool stream_data_changed) const;
+
   int id_;
 
   EchoCancellationImpl* echo_cancellation_;
@@ -87,12 +98,18 @@
   VoiceDetectionImpl* voice_detection_;
 
   std::list<ProcessingComponent*> component_list_;
-
-  FileWrapper* debug_file_;
   CriticalSectionWrapper* crit_;
-
   AudioBuffer* render_audio_;
   AudioBuffer* capture_audio_;
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+  // TODO(andrew): make this more graceful. Ideally we would split this stuff
+  // out into a separate class with an "enabled" and "disabled" implementation.
+  int WriteMessageToDebugFile();
+  int WriteInitMessage();
+  scoped_ptr<FileWrapper> debug_file_;
+  scoped_ptr<audioproc::Event> event_msg_; // Protobuf message.
+  std::string event_str_; // Memory for protobuf serialization.
+#endif
 
   int sample_rate_hz_;
   int split_sample_rate_hz_;
@@ -100,9 +117,9 @@
   int stream_delay_ms_;
   bool was_stream_delay_set_;
 
-  int num_render_input_channels_;
-  int num_capture_input_channels_;
-  int num_capture_output_channels_;
+  int num_reverse_channels_;
+  int num_input_channels_;
+  int num_output_channels_;
 };
 }  // namespace webrtc
 
diff --git a/src/modules/audio_processing/debug.proto b/src/modules/audio_processing/debug.proto
new file mode 100644
index 0000000..4b3a163
--- /dev/null
+++ b/src/modules/audio_processing/debug.proto
@@ -0,0 +1,37 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.audioproc;
+
+message Init {
+  optional int32 sample_rate = 1;
+  optional int32 device_sample_rate = 2;
+  optional int32 num_input_channels = 3;
+  optional int32 num_output_channels = 4;
+  optional int32 num_reverse_channels = 5;
+}
+
+message ReverseStream {
+  optional bytes data = 1;
+}
+
+message Stream {
+  optional bytes input_data = 1;
+  optional bytes output_data = 2;
+  optional int32 delay = 3;
+  optional sint32 drift = 4;
+  optional int32 level = 5;
+}
+
+message Event {
+  enum Type {
+    INIT = 0;
+    REVERSE_STREAM = 1;
+    STREAM = 2;
+  }
+
+  required Type type = 1;
+
+  optional Init init = 2;
+  optional ReverseStream reverse_stream = 3;
+  optional Stream stream = 4;
+}
diff --git a/src/modules/audio_processing/main/source/echo_cancellation_impl.cc b/src/modules/audio_processing/echo_cancellation_impl.cc
similarity index 90%
rename from src/modules/audio_processing/main/source/echo_cancellation_impl.cc
rename to src/modules/audio_processing/echo_cancellation_impl.cc
index 886d5f1..61940b1 100644
--- a/src/modules/audio_processing/main/source/echo_cancellation_impl.cc
+++ b/src/modules/audio_processing/echo_cancellation_impl.cc
@@ -66,7 +66,8 @@
     device_sample_rate_hz_(48000),
     stream_drift_samples_(0),
     was_stream_drift_set_(false),
-    stream_has_echo_(false) {}
+    stream_has_echo_(false),
+    delay_logging_enabled_(false) {}
 
 EchoCancellationImpl::~EchoCancellationImpl() {}
 
@@ -283,6 +284,39 @@
   return stream_has_echo_;
 }
 
+int EchoCancellationImpl::enable_delay_logging(bool enable) {
+  CriticalSectionScoped crit_scoped(*apm_->crit());
+  delay_logging_enabled_ = enable;
+  return Configure();
+}
+
+bool EchoCancellationImpl::is_delay_logging_enabled() const {
+  return delay_logging_enabled_;
+}
+
+// TODO(bjornv): How should we handle the multi-channel case?
+int EchoCancellationImpl::GetDelayMetrics(int* median, int* std) {
+  CriticalSectionScoped crit_scoped(*apm_->crit());
+  if (median == NULL) {
+    return apm_->kNullPointerError;
+  }
+  if (std == NULL) {
+    return apm_->kNullPointerError;
+  }
+
+  if (!is_component_enabled() || !delay_logging_enabled_) {
+    return apm_->kNotEnabledError;
+  }
+
+  Handle* my_handle = static_cast<Handle*>(handle(0));
+  if (WebRtcAec_GetDelayMetrics(my_handle, median, std) !=
+      apm_->kNoError) {
+    return GetHandleError(my_handle);
+  }
+
+  return apm_->kNoError;
+}
+
 int EchoCancellationImpl::Initialize() {
   int err = ProcessingComponent::Initialize();
   if (err != apm_->kNoError || !is_component_enabled()) {
@@ -332,6 +366,7 @@
   config.metricsMode = metrics_enabled_;
   config.nlpMode = MapSetting(suppression_level_);
   config.skewMode = drift_compensation_enabled_;
+  config.delay_logging = delay_logging_enabled_;
 
   return WebRtcAec_set_config(static_cast<Handle*>(handle), config);
 }
diff --git a/src/modules/audio_processing/main/source/echo_cancellation_impl.h b/src/modules/audio_processing/echo_cancellation_impl.h
similarity index 93%
rename from src/modules/audio_processing/main/source/echo_cancellation_impl.h
rename to src/modules/audio_processing/echo_cancellation_impl.h
index 380a698..a483a3a 100644
--- a/src/modules/audio_processing/main/source/echo_cancellation_impl.h
+++ b/src/modules/audio_processing/echo_cancellation_impl.h
@@ -29,6 +29,8 @@
 
   // EchoCancellation implementation.
   virtual bool is_enabled() const;
+  virtual int device_sample_rate_hz() const;
+  virtual int stream_drift_samples() const;
 
   // ProcessingComponent implementation.
   virtual int Initialize();
@@ -40,15 +42,16 @@
   virtual int enable_drift_compensation(bool enable);
   virtual bool is_drift_compensation_enabled() const;
   virtual int set_device_sample_rate_hz(int rate);
-  virtual int device_sample_rate_hz() const;
   virtual int set_stream_drift_samples(int drift);
-  virtual int stream_drift_samples() const;
   virtual int set_suppression_level(SuppressionLevel level);
   virtual SuppressionLevel suppression_level() const;
   virtual int enable_metrics(bool enable);
   virtual bool are_metrics_enabled() const;
   virtual bool stream_has_echo() const;
   virtual int GetMetrics(Metrics* metrics);
+  virtual int enable_delay_logging(bool enable);
+  virtual bool is_delay_logging_enabled() const;
+  virtual int GetDelayMetrics(int* median, int* std);
 
   // ProcessingComponent implementation.
   virtual void* CreateHandle() const;
@@ -66,6 +69,7 @@
   int stream_drift_samples_;
   bool was_stream_drift_set_;
   bool stream_has_echo_;
+  bool delay_logging_enabled_;
 };
 }  // namespace webrtc
 
diff --git a/src/modules/audio_processing/main/source/echo_control_mobile_impl.cc b/src/modules/audio_processing/echo_control_mobile_impl.cc
similarity index 74%
rename from src/modules/audio_processing/main/source/echo_control_mobile_impl.cc
rename to src/modules/audio_processing/echo_control_mobile_impl.cc
index 1cd2502..ff15255 100644
--- a/src/modules/audio_processing/main/source/echo_control_mobile_impl.cc
+++ b/src/modules/audio_processing/echo_control_mobile_impl.cc
@@ -11,6 +11,7 @@
 #include "echo_control_mobile_impl.h"
 
 #include <cassert>
+#include <cstring>
 
 #include "critical_section_wrapper.h"
 #include "echo_control_mobile.h"
@@ -44,26 +45,37 @@
   switch (err) {
     case AECM_UNSUPPORTED_FUNCTION_ERROR:
       return AudioProcessing::kUnsupportedFunctionError;
+    case AECM_NULL_POINTER_ERROR:
+      return AudioProcessing::kNullPointerError;
     case AECM_BAD_PARAMETER_ERROR:
       return AudioProcessing::kBadParameterError;
     case AECM_BAD_PARAMETER_WARNING:
       return AudioProcessing::kBadStreamParameterWarning;
     default:
-      // AECMOBFIX_UNSPECIFIED_ERROR
-      // AECMOBFIX_UNINITIALIZED_ERROR
-      // AECMOBFIX_NULL_POINTER_ERROR
+      // AECM_UNSPECIFIED_ERROR
+      // AECM_UNINITIALIZED_ERROR
       return AudioProcessing::kUnspecifiedError;
   }
 }
 }  // namespace
 
+size_t EchoControlMobile::echo_path_size_bytes() {
+    return WebRtcAecm_echo_path_size_bytes();
+}
+
 EchoControlMobileImpl::EchoControlMobileImpl(const AudioProcessingImpl* apm)
   : ProcessingComponent(apm),
     apm_(apm),
     routing_mode_(kSpeakerphone),
-    comfort_noise_enabled_(true) {}
+    comfort_noise_enabled_(true),
+    external_echo_path_(NULL) {}
 
-EchoControlMobileImpl::~EchoControlMobileImpl() {}
+EchoControlMobileImpl::~EchoControlMobileImpl() {
+    if (external_echo_path_ != NULL) {
+      delete [] external_echo_path_;
+      external_echo_path_ = NULL;
+    }
+}
 
 int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) {
   if (!is_component_enabled()) {
@@ -181,6 +193,48 @@
   return comfort_noise_enabled_;
 }
 
+int EchoControlMobileImpl::SetEchoPath(const void* echo_path,
+                                       size_t size_bytes) {
+  CriticalSectionScoped crit_scoped(*apm_->crit());
+  if (echo_path == NULL) {
+    return apm_->kNullPointerError;
+  }
+  if (size_bytes != echo_path_size_bytes()) {
+    // Size mismatch
+    return apm_->kBadParameterError;
+  }
+
+  if (external_echo_path_ == NULL) {
+    external_echo_path_ = new unsigned char[size_bytes];
+  }
+  memcpy(external_echo_path_, echo_path, size_bytes);
+
+  return Initialize();
+}
+
+int EchoControlMobileImpl::GetEchoPath(void* echo_path,
+                                       size_t size_bytes) const {
+  CriticalSectionScoped crit_scoped(*apm_->crit());
+  if (echo_path == NULL) {
+    return apm_->kNullPointerError;
+  }
+  if (size_bytes != echo_path_size_bytes()) {
+    // Size mismatch
+    return apm_->kBadParameterError;
+  }
+  if (!is_component_enabled()) {
+    return apm_->kNotEnabledError;
+  }
+
+  // Get the echo path from the first channel
+  Handle* my_handle = static_cast<Handle*>(handle(0));
+  if (WebRtcAecm_GetEchoPath(my_handle, echo_path, size_bytes) != 0) {
+      return GetHandleError(my_handle);
+  }
+
+  return apm_->kNoError;
+}
+
 int EchoControlMobileImpl::Initialize() {
   if (!is_component_enabled()) {
     return apm_->kNoError;
@@ -197,7 +251,7 @@
 int EchoControlMobileImpl::get_version(char* version,
                                        int version_len_bytes) const {
   if (WebRtcAecm_get_version(version, version_len_bytes) != 0) {
-      return apm_->kBadParameterError;
+    return apm_->kBadParameterError;
   }
 
   return apm_->kNoError;
@@ -219,10 +273,20 @@
 }
 
 int EchoControlMobileImpl::InitializeHandle(void* handle) const {
-  return WebRtcAecm_Init(static_cast<Handle*>(handle),
-                         apm_->sample_rate_hz(),
-                         48000); // Dummy value. This isn't actually
-                                 // required by AECM.
+  assert(handle != NULL);
+  Handle* my_handle = static_cast<Handle*>(handle);
+  if (WebRtcAecm_Init(my_handle, apm_->sample_rate_hz()) != 0) {
+    return GetHandleError(my_handle);
+  }
+  if (external_echo_path_ != NULL) {
+    if (WebRtcAecm_InitEchoPath(my_handle,
+                                external_echo_path_,
+                                echo_path_size_bytes()) != 0) {
+      return GetHandleError(my_handle);
+    }
+  }
+
+  return apm_->kNoError;
 }
 
 int EchoControlMobileImpl::ConfigureHandle(void* handle) const {
diff --git a/src/modules/audio_processing/main/source/echo_control_mobile_impl.h b/src/modules/audio_processing/echo_control_mobile_impl.h
similarity index 91%
rename from src/modules/audio_processing/main/source/echo_control_mobile_impl.h
rename to src/modules/audio_processing/echo_control_mobile_impl.h
index 2fd6248..6314e66 100644
--- a/src/modules/audio_processing/main/source/echo_control_mobile_impl.h
+++ b/src/modules/audio_processing/echo_control_mobile_impl.h
@@ -41,6 +41,8 @@
   virtual RoutingMode routing_mode() const;
   virtual int enable_comfort_noise(bool enable);
   virtual bool is_comfort_noise_enabled() const;
+  virtual int SetEchoPath(const void* echo_path, size_t size_bytes);
+  virtual int GetEchoPath(void* echo_path, size_t size_bytes) const;
 
   // ProcessingComponent implementation.
   virtual void* CreateHandle() const;
@@ -53,6 +55,7 @@
   const AudioProcessingImpl* apm_;
   RoutingMode routing_mode_;
   bool comfort_noise_enabled_;
+  unsigned char* external_echo_path_;
 };
 }  // namespace webrtc
 
diff --git a/src/modules/audio_processing/main/source/gain_control_impl.cc b/src/modules/audio_processing/gain_control_impl.cc
similarity index 100%
rename from src/modules/audio_processing/main/source/gain_control_impl.cc
rename to src/modules/audio_processing/gain_control_impl.cc
diff --git a/src/modules/audio_processing/main/source/gain_control_impl.h b/src/modules/audio_processing/gain_control_impl.h
similarity index 99%
rename from src/modules/audio_processing/main/source/gain_control_impl.h
rename to src/modules/audio_processing/gain_control_impl.h
index a11d606..7b6987e 100644
--- a/src/modules/audio_processing/main/source/gain_control_impl.h
+++ b/src/modules/audio_processing/gain_control_impl.h
@@ -36,12 +36,12 @@
 
   // GainControl implementation.
   virtual bool is_enabled() const;
+  virtual int stream_analog_level();
 
  private:
   // GainControl implementation.
   virtual int Enable(bool enable);
   virtual int set_stream_analog_level(int level);
-  virtual int stream_analog_level();
   virtual int set_mode(Mode mode);
   virtual Mode mode() const;
   virtual int set_target_level_dbfs(int level);
diff --git a/src/modules/audio_processing/main/source/high_pass_filter_impl.cc b/src/modules/audio_processing/high_pass_filter_impl.cc
similarity index 100%
rename from src/modules/audio_processing/main/source/high_pass_filter_impl.cc
rename to src/modules/audio_processing/high_pass_filter_impl.cc
diff --git a/src/modules/audio_processing/main/source/high_pass_filter_impl.h b/src/modules/audio_processing/high_pass_filter_impl.h
similarity index 100%
rename from src/modules/audio_processing/main/source/high_pass_filter_impl.h
rename to src/modules/audio_processing/high_pass_filter_impl.h
diff --git a/src/modules/audio_processing/main/interface/audio_processing.h b/src/modules/audio_processing/interface/audio_processing.h
similarity index 89%
rename from src/modules/audio_processing/main/interface/audio_processing.h
rename to src/modules/audio_processing/interface/audio_processing.h
index dc9c232..ee4d06f 100644
--- a/src/modules/audio_processing/main/interface/audio_processing.h
+++ b/src/modules/audio_processing/interface/audio_processing.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_INTERFACE_AUDIO_PROCESSING_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_INTERFACE_AUDIO_PROCESSING_H_
 
+#include <stddef.h> // size_t
+
 #include "typedefs.h"
 #include "module.h"
 
@@ -111,7 +113,10 @@
   // for each far-end stream which requires processing. On the server-side,
   // this would typically be one instance for every incoming stream.
   static AudioProcessing* Create(int id);
+  virtual ~AudioProcessing() {};
 
+  // TODO(andrew): remove this method. We now allow users to delete instances
+  // directly, useful for scoped_ptr.
   // Destroys a |apm| instance.
   static void Destroy(AudioProcessing* apm);
 
@@ -186,7 +191,7 @@
   // a NULL-terminated string. If there is an ongoing recording, the old file
   // will be closed, and recording will continue in the newly specified file.
   // An already existing file will be overwritten without warning.
-  static const int kMaxFilenameSize = 1024;
+  static const size_t kMaxFilenameSize = 1024;
   virtual int StartDebugRecording(const char filename[kMaxFilenameSize]) = 0;
 
   // Stops recording debugging information, and closes the file. Recording
@@ -238,9 +243,6 @@
   // Inherited from Module.
   virtual WebRtc_Word32 TimeUntilNextProcess() { return -1; };
   virtual WebRtc_Word32 Process() { return -1; };
-
- protected:
-  virtual ~AudioProcessing() {};
 };
 
 // The acoustic echo cancellation (AEC) component provides better performance
@@ -320,6 +322,16 @@
   // TODO(ajm): discuss the metrics update period.
   virtual int GetMetrics(Metrics* metrics) = 0;
 
+  // Enables computation and logging of delay values. Statistics are obtained
+  // through |GetDelayMetrics()|.
+  virtual int enable_delay_logging(bool enable) = 0;
+  virtual bool is_delay_logging_enabled() const = 0;
+
+  // The delay metrics consists of the delay |median| and the delay standard
+  // deviation |std|. The values are averaged over the time period since the
+  // last call to |GetDelayMetrics()|.
+  virtual int GetDelayMetrics(int* median, int* std) = 0;
+
  protected:
   virtual ~EchoCancellation() {};
 };
@@ -356,6 +368,26 @@
   virtual int enable_comfort_noise(bool enable) = 0;
   virtual bool is_comfort_noise_enabled() const = 0;
 
+  // A typical use case is to initialize the component with an echo path from a
+  // previous call. The echo path is retrieved using |GetEchoPath()|, typically
+  // at the end of a call. The data can then be stored for later use as an
+  // initializer before the next call, using |SetEchoPath()|.
+  //
+  // Controlling the echo path this way requires the data |size_bytes| to match
+  // the internal echo path size. This size can be acquired using
+  // |echo_path_size_bytes()|. |SetEchoPath()| causes an entire reset, worth
+  // noting if it is to be called during an ongoing call.
+  //
+  // It is possible that version incompatibilities may result in a stored echo
+  // path of the incorrect size. In this case, the stored path should be
+  // discarded.
+  virtual int SetEchoPath(const void* echo_path, size_t size_bytes) = 0;
+  virtual int GetEchoPath(void* echo_path, size_t size_bytes) const = 0;
+
+  // The returned path size is guaranteed not to change for the lifetime of
+  // the application.
+  static size_t echo_path_size_bytes();
+
  protected:
   virtual ~EchoControlMobile() {};
 };
@@ -469,21 +501,18 @@
   virtual int Enable(bool enable) = 0;
   virtual bool is_enabled() const = 0;
 
-  // The metrics are reported in dBFs calculated as:
-  //   Level = 10log_10(P_s / P_max) [dBFs], where
-  //   P_s is the signal power and P_max is the maximum possible (or peak)
-  //   power. With 16-bit signals, P_max = (2^15)^2.
-  struct Metrics {
-    AudioProcessing::Statistic signal;  // Overall signal level.
-    AudioProcessing::Statistic speech;  // Speech level.
-    AudioProcessing::Statistic noise;   // Noise level.
-  };
-
-  virtual int GetMetrics(Metrics* metrics, Metrics* reverse_metrics) = 0;
-
-  //virtual int enable_noise_warning(bool enable) = 0;
-  //bool is_noise_warning_enabled() const = 0;
-  //virtual bool stream_has_high_noise() const = 0;
+  // Returns the root mean square (RMS) level in dBFs (decibels from digital
+  // full-scale), or alternately dBov. It is computed over all primary stream
+  // frames since the last call to RMS(). The returned value is positive but
+  // should be interpreted as negative. It is constrained to [0, 127].
+  //
+  // The computation follows:
+  // http://tools.ietf.org/html/draft-ietf-avtext-client-to-mixer-audio-level-05
+  // with the intent that it can provide the RTP audio level indication.
+  //
+  // Frames passed to ProcessStream() with an |_energy| of zero are considered
+  // to have been muted. The RMS of the frame will be interpreted as -127.
+  virtual int RMS() = 0;
 
  protected:
   virtual ~LevelEstimator() {};
@@ -517,6 +546,10 @@
 // The voice activity detection (VAD) component analyzes the stream to
 // determine if voice is present. A facility is also provided to pass in an
 // external VAD decision.
+//
+// In addition to |stream_has_voice()| the VAD decision is provided through the
+// |AudioFrame| passed to |ProcessStream()|. The |_vadActivity| member will be
+// modified to reflect the current decision.
 class VoiceDetection {
  public:
   virtual int Enable(bool enable) = 0;
diff --git a/src/modules/audio_processing/level_estimator_impl.cc b/src/modules/audio_processing/level_estimator_impl.cc
new file mode 100644
index 0000000..f127d4a
--- /dev/null
+++ b/src/modules/audio_processing/level_estimator_impl.cc
@@ -0,0 +1,172 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "level_estimator_impl.h"
+
+#include <assert.h>
+#include <math.h>
+#include <string.h>
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+#include "critical_section_wrapper.h"
+
+namespace webrtc {
+namespace {
+
+const double kMaxSquaredLevel = 32768.0 * 32768.0;
+
+class Level {
+ public:
+  static const int kMinLevel = 127;
+
+  Level()
+    : sum_square_(0.0),
+      sample_count_(0) {}
+  ~Level() {}
+
+  void Init() {
+    sum_square_ = 0.0;
+    sample_count_ = 0;
+  }
+
+  void Process(int16_t* data, int length) {
+    assert(data != NULL);
+    assert(length > 0);
+    sum_square_ += SumSquare(data, length);
+    sample_count_ += length;
+  }
+
+  void ProcessMuted(int length) {
+    assert(length > 0);
+    sample_count_ += length;
+  }
+
+  int RMS() {
+    if (sample_count_ == 0 || sum_square_ == 0.0) {
+      Init();
+      return kMinLevel;
+    }
+
+    // Normalize by the max level.
+    double rms = sum_square_ / (sample_count_ * kMaxSquaredLevel);
+    // 20log_10(x^0.5) = 10log_10(x)
+    rms = 10 * log10(rms);
+    if (rms > 0)
+      rms = 0;
+    else if (rms < -kMinLevel)
+      rms = -kMinLevel;
+
+    rms = -rms;
+    Init();
+    return static_cast<int>(rms + 0.5);
+  }
+
+ private:
+  static double SumSquare(int16_t* data, int length) {
+    double sum_square = 0.0;
+    for (int i = 0; i < length; ++i) {
+      double data_d = static_cast<double>(data[i]);
+      sum_square += data_d * data_d;
+    }
+    return sum_square;
+  }
+
+  double sum_square_;
+  int sample_count_;
+};
+}  // namespace
+
+LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessingImpl* apm)
+  : ProcessingComponent(apm),
+    apm_(apm) {}
+
+LevelEstimatorImpl::~LevelEstimatorImpl() {}
+
+int LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) {
+  if (!is_component_enabled()) {
+    return apm_->kNoError;
+  }
+
+  Level* level = static_cast<Level*>(handle(0));
+  if (audio->is_muted()) {
+    level->ProcessMuted(audio->samples_per_channel());
+    return apm_->kNoError;
+  }
+
+  int16_t* mixed_data = audio->data(0);
+  if (audio->num_channels() > 1) {
+    audio->CopyAndMix(1);
+    mixed_data = audio->mixed_data(0);
+  }
+
+  level->Process(mixed_data, audio->samples_per_channel());
+
+  return apm_->kNoError;
+}
+
+int LevelEstimatorImpl::Enable(bool enable) {
+  CriticalSectionScoped crit_scoped(*apm_->crit());
+  return EnableComponent(enable);
+}
+
+bool LevelEstimatorImpl::is_enabled() const {
+  return is_component_enabled();
+}
+
+int LevelEstimatorImpl::RMS() {
+  if (!is_component_enabled()) {
+    return apm_->kNotEnabledError;
+  }
+
+  Level* level = static_cast<Level*>(handle(0));
+  return level->RMS();
+}
+
+int LevelEstimatorImpl::get_version(char* version,
+                                    int version_len_bytes) const {
+  // An empty string is used to indicate no version information.
+  memset(version, 0, version_len_bytes);
+  return apm_->kNoError;
+}
+
+void* LevelEstimatorImpl::CreateHandle() const {
+  return new Level;
+}
+
+int LevelEstimatorImpl::DestroyHandle(void* handle) const {
+  assert(handle != NULL);
+  Level* level = static_cast<Level*>(handle);
+  delete level;
+  return apm_->kNoError;
+}
+
+int LevelEstimatorImpl::InitializeHandle(void* handle) const {
+  assert(handle != NULL);
+  Level* level = static_cast<Level*>(handle);
+  level->Init();
+
+  return apm_->kNoError;
+}
+
+int LevelEstimatorImpl::ConfigureHandle(void* /*handle*/) const {
+  return apm_->kNoError;
+}
+
+int LevelEstimatorImpl::num_handles_required() const {
+  return 1;
+}
+
+int LevelEstimatorImpl::GetHandleError(void* handle) const {
+  // The component has no detailed errors.
+  assert(handle != NULL);
+  return apm_->kUnspecifiedError;
+}
+}  // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/level_estimator_impl.h b/src/modules/audio_processing/level_estimator_impl.h
similarity index 90%
rename from src/modules/audio_processing/main/source/level_estimator_impl.h
rename to src/modules/audio_processing/level_estimator_impl.h
index 1515722..c9b7e02 100644
--- a/src/modules/audio_processing/main/source/level_estimator_impl.h
+++ b/src/modules/audio_processing/level_estimator_impl.h
@@ -24,8 +24,7 @@
   explicit LevelEstimatorImpl(const AudioProcessingImpl* apm);
   virtual ~LevelEstimatorImpl();
 
-  int AnalyzeReverseStream(AudioBuffer* audio);
-  int ProcessCaptureAudio(AudioBuffer* audio);
+  int ProcessStream(AudioBuffer* audio);
 
   // LevelEstimator implementation.
   virtual bool is_enabled() const;
@@ -36,7 +35,7 @@
  private:
   // LevelEstimator implementation.
   virtual int Enable(bool enable);
-  virtual int GetMetrics(Metrics* metrics, Metrics* reverse_metrics);
+  virtual int RMS();
 
   // ProcessingComponent implementation.
   virtual void* CreateHandle() const;
diff --git a/src/modules/audio_processing/main/apm_tests.gyp b/src/modules/audio_processing/main/apm_tests.gyp
deleted file mode 100644
index 441abeb..0000000
--- a/src/modules/audio_processing/main/apm_tests.gyp
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-{
-  'includes': [
-    '../../../common_settings.gypi',
-  ],
-  'targets': [
-    {
-      'target_name': 'unit_test',
-      'type': 'executable',
-      'dependencies': [
-        'source/apm.gyp:audio_processing',
-        '../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
-        '../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
-
-        '../../../../testing/gtest.gyp:gtest',
-        '../../../../testing/gtest.gyp:gtest_main',
-        '../../../../third_party/protobuf/protobuf.gyp:protobuf_lite',
-      ],
-      'include_dirs': [
-        '../../../../testing/gtest/include',
-      ],
-      'sources': [
-        'test/unit_test/unit_test.cc',
-        'test/unit_test/audio_processing_unittest.pb.cc',
-        'test/unit_test/audio_processing_unittest.pb.h',
-      ],
-    },
-    {
-      'target_name': 'process_test',
-      'type': 'executable',
-      'dependencies': [
-        'source/apm.gyp:audio_processing',
-        '../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
-
-        '../../../../testing/gtest.gyp:gtest',
-        '../../../../testing/gtest.gyp:gtest_main',
-      ],
-      'include_dirs': [
-        '../../../../testing/gtest/include',
-      ],
-      'sources': [
-        'test/process_test/process_test.cc',
-      ],
-    },
-
-  ],
-}
-
-# Local Variables:
-# tab-width:2
-# indent-tabs-mode:nil
-# End:
-# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/modules/audio_processing/main/source/Android.mk b/src/modules/audio_processing/main/source/Android.mk
deleted file mode 100644
index 634ad6a..0000000
--- a/src/modules/audio_processing/main/source/Android.mk
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_ARM_MODE := arm
-LOCAL_MODULE := libwebrtc_apm
-LOCAL_MODULE_TAGS := optional
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_GENERATED_SOURCES :=
-LOCAL_SRC_FILES := audio_buffer.cc \
-    audio_processing_impl.cc \
-    echo_cancellation_impl.cc \
-    echo_control_mobile_impl.cc \
-    gain_control_impl.cc \
-    high_pass_filter_impl.cc \
-    level_estimator_impl.cc \
-    noise_suppression_impl.cc \
-    splitting_filter.cc \
-    processing_component.cc \
-    voice_detection_impl.cc
-
-# Flags passed to both C and C++ files.
-MY_CFLAGS :=  
-MY_CFLAGS_C :=
-MY_DEFS := '-DNO_TCMALLOC' \
-    '-DNO_HEAPCHECKER' \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' \
-    '-DWEBRTC_THREAD_RR' \
-    '-DWEBRTC_NS_FIXED'
-#   floating point
-#   -DWEBRTC_NS_FLOAT'
-ifeq ($(TARGET_ARCH),arm) 
-MY_DEFS += \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
-endif
-LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
-
-# Include paths placed before CFLAGS/CPPFLAGS
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/../../../.. \
-    $(LOCAL_PATH)/../interface \
-    $(LOCAL_PATH)/../../../interface \
-    $(LOCAL_PATH)/../../../../system_wrappers/interface \
-    $(LOCAL_PATH)/../../aec/main/interface \
-    $(LOCAL_PATH)/../../aecm/main/interface \
-    $(LOCAL_PATH)/../../agc/main/interface \
-    $(LOCAL_PATH)/../../ns/main/interface \
-    $(LOCAL_PATH)/../../../../common_audio/signal_processing_library/main/interface \
-    $(LOCAL_PATH)/../../../../common_audio/vad/main/interface 
-
-# Flags passed to only C++ (and not C) files.
-LOCAL_CPPFLAGS := 
-
-LOCAL_LDFLAGS :=
-
-LOCAL_STATIC_LIBRARIES := 
-
-LOCAL_SHARED_LIBRARIES := libcutils \
-    libdl \
-    libstlport 
-
-LOCAL_ADDITIONAL_DEPENDENCIES :=
-
-include external/stlport/libstlport.mk
-include $(BUILD_STATIC_LIBRARY)
-
diff --git a/src/modules/audio_processing/main/source/apm.gyp b/src/modules/audio_processing/main/source/apm.gyp
deleted file mode 100644
index 93811c7..0000000
--- a/src/modules/audio_processing/main/source/apm.gyp
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-{
-  'includes': [
-    '../../../../common_settings.gypi', # Common settings
-  ],
-  'targets': [
-    {
-      'target_name': 'audio_processing',
-      'type': '<(library)',
-      'conditions': [
-        ['prefer_fixed_point==1', {
-          'dependencies': ['../../ns/main/source/ns.gyp:ns_fix'],
-          'defines': ['WEBRTC_NS_FIXED'],
-        }, { # else: prefer_fixed_point==0
-          'dependencies': ['../../ns/main/source/ns.gyp:ns'],
-          'defines': ['WEBRTC_NS_FLOAT'],
-        }],
-      ],
-      'dependencies': [
-        '../../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
-        '../../aec/main/source/aec.gyp:aec',
-        '../../aecm/main/source/aecm.gyp:aecm',
-        '../../agc/main/source/agc.gyp:agc',
-        '../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
-        '../../../../common_audio/vad/main/source/vad.gyp:vad',
-      ],
-      'include_dirs': [
-        '../interface',
-        '../../../interface',
-      ],
-      'direct_dependent_settings': {
-        'include_dirs': [
-          '../interface',
-          '../../../interface',
-        ],
-      },
-      'sources': [
-        '../interface/audio_processing.h',
-        'audio_buffer.cc',
-        'audio_buffer.h',
-        'audio_processing_impl.cc',
-        'audio_processing_impl.h',
-        'echo_cancellation_impl.cc',
-        'echo_cancellation_impl.h',
-        'echo_control_mobile_impl.cc',
-        'echo_control_mobile_impl.h',
-        'gain_control_impl.cc',
-        'gain_control_impl.h',
-        'high_pass_filter_impl.cc',
-        'high_pass_filter_impl.h',
-        'level_estimator_impl.cc',
-        'level_estimator_impl.h',
-        'noise_suppression_impl.cc',
-        'noise_suppression_impl.h',
-        'splitting_filter.cc',
-        'splitting_filter.h',
-        'processing_component.cc',
-        'processing_component.h',
-        'voice_detection_impl.cc',
-        'voice_detection_impl.h',
-      ],
-    },
-  ],
-}
-
-# Local Variables:
-# tab-width:2
-# indent-tabs-mode:nil
-# End:
-# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/modules/audio_processing/main/source/audio_buffer.cc b/src/modules/audio_processing/main/source/audio_buffer.cc
deleted file mode 100644
index 6b20fce..0000000
--- a/src/modules/audio_processing/main/source/audio_buffer.cc
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "audio_buffer.h"
-
-#include "module_common_types.h"
-
-namespace webrtc {
-namespace {
-
-enum {
-  kSamplesPer8kHzChannel = 80,
-  kSamplesPer16kHzChannel = 160,
-  kSamplesPer32kHzChannel = 320
-};
-
-void StereoToMono(const WebRtc_Word16* left, const WebRtc_Word16* right,
-                  WebRtc_Word16* out, int samples_per_channel) {
-  WebRtc_Word32 data_int32 = 0;
-  for (int i = 0; i < samples_per_channel; i++) {
-    data_int32 = (left[i] + right[i]) >> 1;
-    if (data_int32 > 32767) {
-      data_int32 = 32767;
-    } else if (data_int32 < -32768) {
-      data_int32 = -32768;
-    }
-
-    out[i] = static_cast<WebRtc_Word16>(data_int32);
-  }
-}
-}  // namespace
-
-struct AudioChannel {
-  AudioChannel() {
-    memset(data, 0, sizeof(data));
-  }
-
-  WebRtc_Word16 data[kSamplesPer32kHzChannel];
-};
-
-struct SplitAudioChannel {
-  SplitAudioChannel() {
-    memset(low_pass_data, 0, sizeof(low_pass_data));
-    memset(high_pass_data, 0, sizeof(high_pass_data));
-    memset(analysis_filter_state1, 0, sizeof(analysis_filter_state1));
-    memset(analysis_filter_state2, 0, sizeof(analysis_filter_state2));
-    memset(synthesis_filter_state1, 0, sizeof(synthesis_filter_state1));
-    memset(synthesis_filter_state2, 0, sizeof(synthesis_filter_state2));
-  }
-
-  WebRtc_Word16 low_pass_data[kSamplesPer16kHzChannel];
-  WebRtc_Word16 high_pass_data[kSamplesPer16kHzChannel];
-
-  WebRtc_Word32 analysis_filter_state1[6];
-  WebRtc_Word32 analysis_filter_state2[6];
-  WebRtc_Word32 synthesis_filter_state1[6];
-  WebRtc_Word32 synthesis_filter_state2[6];
-};
-
-// TODO(am): check range of input parameters?
-AudioBuffer::AudioBuffer(WebRtc_Word32 max_num_channels,
-                         WebRtc_Word32 samples_per_channel)
-    : max_num_channels_(max_num_channels),
-      num_channels_(0),
-      num_mixed_channels_(0),
-      num_mixed_low_pass_channels_(0),
-      samples_per_channel_(samples_per_channel),
-      samples_per_split_channel_(samples_per_channel),
-      reference_copied_(false),
-      data_(NULL),
-      channels_(NULL),
-      split_channels_(NULL),
-      mixed_low_pass_channels_(NULL),
-      low_pass_reference_channels_(NULL) {
-  if (max_num_channels_ > 1) {
-    channels_ = new AudioChannel[max_num_channels_];
-    mixed_low_pass_channels_ = new AudioChannel[max_num_channels_];
-  }
-  low_pass_reference_channels_ = new AudioChannel[max_num_channels_];
-
-  if (samples_per_channel_ == kSamplesPer32kHzChannel) {
-    split_channels_ = new SplitAudioChannel[max_num_channels_];
-    samples_per_split_channel_ = kSamplesPer16kHzChannel;
-  }
-}
-
-AudioBuffer::~AudioBuffer() {
-  if (channels_ != NULL) {
-    delete [] channels_;
-  }
-
-  if (mixed_low_pass_channels_ != NULL) {
-    delete [] mixed_low_pass_channels_;
-  }
-
-  if (low_pass_reference_channels_ != NULL) {
-    delete [] low_pass_reference_channels_;
-  }
-
-  if (split_channels_ != NULL) {
-    delete [] split_channels_;
-  }
-}
-
-WebRtc_Word16* AudioBuffer::data(WebRtc_Word32 channel) const {
-  assert(channel >= 0 && channel < num_channels_);
-  if (data_ != NULL) {
-    return data_;
-  }
-
-  return channels_[channel].data;
-}
-
-WebRtc_Word16* AudioBuffer::low_pass_split_data(WebRtc_Word32 channel) const {
-  assert(channel >= 0 && channel < num_channels_);
-  if (split_channels_ == NULL) {
-    return data(channel);
-  }
-
-  return split_channels_[channel].low_pass_data;
-}
-
-WebRtc_Word16* AudioBuffer::high_pass_split_data(WebRtc_Word32 channel) const {
-  assert(channel >= 0 && channel < num_channels_);
-  if (split_channels_ == NULL) {
-    return NULL;
-  }
-
-  return split_channels_[channel].high_pass_data;
-}
-
-WebRtc_Word16* AudioBuffer::mixed_low_pass_data(WebRtc_Word32 channel) const {
-  assert(channel >= 0 && channel < num_mixed_low_pass_channels_);
-
-  return mixed_low_pass_channels_[channel].data;
-}
-
-WebRtc_Word16* AudioBuffer::low_pass_reference(WebRtc_Word32 channel) const {
-  assert(channel >= 0 && channel < num_channels_);
-  if (!reference_copied_) {
-    return NULL;
-  }
-
-  return low_pass_reference_channels_[channel].data;
-}
-
-WebRtc_Word32* AudioBuffer::analysis_filter_state1(WebRtc_Word32 channel) const {
-  assert(channel >= 0 && channel < num_channels_);
-  return split_channels_[channel].analysis_filter_state1;
-}
-
-WebRtc_Word32* AudioBuffer::analysis_filter_state2(WebRtc_Word32 channel) const {
-  assert(channel >= 0 && channel < num_channels_);
-  return split_channels_[channel].analysis_filter_state2;
-}
-
-WebRtc_Word32* AudioBuffer::synthesis_filter_state1(WebRtc_Word32 channel) const {
-  assert(channel >= 0 && channel < num_channels_);
-  return split_channels_[channel].synthesis_filter_state1;
-}
-
-WebRtc_Word32* AudioBuffer::synthesis_filter_state2(WebRtc_Word32 channel) const {
-  assert(channel >= 0 && channel < num_channels_);
-  return split_channels_[channel].synthesis_filter_state2;
-}
-
-WebRtc_Word32 AudioBuffer::num_channels() const {
-  return num_channels_;
-}
-
-WebRtc_Word32 AudioBuffer::samples_per_channel() const {
-  return samples_per_channel_;
-}
-
-WebRtc_Word32 AudioBuffer::samples_per_split_channel() const {
-  return samples_per_split_channel_;
-}
-
-// TODO(ajm): Do deinterleaving and mixing in one step?
-void AudioBuffer::DeinterleaveFrom(AudioFrame* audioFrame) {
-  assert(audioFrame->_audioChannel <= max_num_channels_);
-  assert(audioFrame->_payloadDataLengthInSamples ==  samples_per_channel_);
-
-  num_channels_ = audioFrame->_audioChannel;
-  num_mixed_channels_ = 0;
-  num_mixed_low_pass_channels_ = 0;
-  reference_copied_ = false;
-
-  if (num_channels_ == 1) {
-    // We can get away with a pointer assignment in this case.
-    data_ = audioFrame->_payloadData;
-    return;
-  }
-
-  for (int i = 0; i < num_channels_; i++) {
-    WebRtc_Word16* deinterleaved = channels_[i].data;
-    WebRtc_Word16* interleaved = audioFrame->_payloadData;
-    WebRtc_Word32 interleaved_idx = i;
-    for (int j = 0; j < samples_per_channel_; j++) {
-      deinterleaved[j] = interleaved[interleaved_idx];
-      interleaved_idx += num_channels_;
-    }
-  }
-}
-
-void AudioBuffer::InterleaveTo(AudioFrame* audioFrame) const {
-  assert(audioFrame->_audioChannel == num_channels_);
-  assert(audioFrame->_payloadDataLengthInSamples == samples_per_channel_);
-
-  if (num_channels_ == 1) {
-    if (num_mixed_channels_ == 1) {
-      memcpy(audioFrame->_payloadData,
-             channels_[0].data,
-             sizeof(WebRtc_Word16) * samples_per_channel_);
-    } else {
-      // These should point to the same buffer in this case.
-      assert(data_ == audioFrame->_payloadData);
-    }
-
-    return;
-  }
-
-  for (int i = 0; i < num_channels_; i++) {
-    WebRtc_Word16* deinterleaved = channels_[i].data;
-    WebRtc_Word16* interleaved = audioFrame->_payloadData;
-    WebRtc_Word32 interleaved_idx = i;
-    for (int j = 0; j < samples_per_channel_; j++) {
-      interleaved[interleaved_idx] = deinterleaved[j];
-      interleaved_idx += num_channels_;
-    }
-  }
-}
-
-// TODO(ajm): would be good to support the no-mix case with pointer assignment.
-// TODO(ajm): handle mixing to multiple channels?
-void AudioBuffer::Mix(WebRtc_Word32 num_mixed_channels) {
-  // We currently only support the stereo to mono case.
-  assert(num_channels_ == 2);
-  assert(num_mixed_channels == 1);
-
-  StereoToMono(channels_[0].data,
-               channels_[1].data,
-               channels_[0].data,
-               samples_per_channel_);
-
-  num_channels_ = num_mixed_channels;
-  num_mixed_channels_ = num_mixed_channels;
-}
-
-void AudioBuffer::CopyAndMixLowPass(WebRtc_Word32 num_mixed_channels) {
-  // We currently only support the stereo to mono case.
-  assert(num_channels_ == 2);
-  assert(num_mixed_channels == 1);
-
-  StereoToMono(low_pass_split_data(0),
-               low_pass_split_data(1),
-               mixed_low_pass_channels_[0].data,
-               samples_per_split_channel_);
-
-  num_mixed_low_pass_channels_ = num_mixed_channels;
-}
-
-void AudioBuffer::CopyLowPassToReference() {
-  reference_copied_ = true;
-  for (int i = 0; i < num_channels_; i++) {
-    memcpy(low_pass_reference_channels_[i].data,
-           low_pass_split_data(i),
-           sizeof(WebRtc_Word16) * samples_per_split_channel_);
-  }
-}
-}  // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/audio_buffer.h b/src/modules/audio_processing/main/source/audio_buffer.h
deleted file mode 100644
index 15f850b..0000000
--- a/src/modules/audio_processing/main/source/audio_buffer.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
-
-#include "typedefs.h"
-
-
-namespace webrtc {
-
-struct AudioChannel;
-struct SplitAudioChannel;
-class AudioFrame;
-
-class AudioBuffer {
- public:
-  AudioBuffer(WebRtc_Word32 max_num_channels, WebRtc_Word32 samples_per_channel);
-  virtual ~AudioBuffer();
-
-  WebRtc_Word32 num_channels() const;
-  WebRtc_Word32 samples_per_channel() const;
-  WebRtc_Word32 samples_per_split_channel() const;
-
-  WebRtc_Word16* data(WebRtc_Word32 channel) const;
-  WebRtc_Word16* low_pass_split_data(WebRtc_Word32 channel) const;
-  WebRtc_Word16* high_pass_split_data(WebRtc_Word32 channel) const;
-  WebRtc_Word16* mixed_low_pass_data(WebRtc_Word32 channel) const;
-  WebRtc_Word16* low_pass_reference(WebRtc_Word32 channel) const;
-
-  WebRtc_Word32* analysis_filter_state1(WebRtc_Word32 channel) const;
-  WebRtc_Word32* analysis_filter_state2(WebRtc_Word32 channel) const;
-  WebRtc_Word32* synthesis_filter_state1(WebRtc_Word32 channel) const;
-  WebRtc_Word32* synthesis_filter_state2(WebRtc_Word32 channel) const;
-
-  void DeinterleaveFrom(AudioFrame* audioFrame);
-  void InterleaveTo(AudioFrame* audioFrame) const;
-  void Mix(WebRtc_Word32 num_mixed_channels);
-  void CopyAndMixLowPass(WebRtc_Word32 num_mixed_channels);
-  void CopyLowPassToReference();
-
- private:
-  const WebRtc_Word32 max_num_channels_;
-  WebRtc_Word32 num_channels_;
-  WebRtc_Word32 num_mixed_channels_;
-  WebRtc_Word32 num_mixed_low_pass_channels_;
-  const WebRtc_Word32 samples_per_channel_;
-  WebRtc_Word32 samples_per_split_channel_;
-  bool reference_copied_;
-
-  WebRtc_Word16* data_;
-  // TODO(ajm): Prefer to make these vectors if permitted...
-  AudioChannel* channels_;
-  SplitAudioChannel* split_channels_;
-  // TODO(ajm): improve this, we don't need the full 32 kHz space here.
-  AudioChannel* mixed_low_pass_channels_;
-  AudioChannel* low_pass_reference_channels_;
-};
-}  // namespace webrtc
-
-#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
diff --git a/src/modules/audio_processing/main/source/level_estimator_impl.cc b/src/modules/audio_processing/main/source/level_estimator_impl.cc
deleted file mode 100644
index 799a962..0000000
--- a/src/modules/audio_processing/main/source/level_estimator_impl.cc
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "level_estimator_impl.h"
-
-#include <cassert>
-#include <cstring>
-
-#include "critical_section_wrapper.h"
-
-#include "audio_processing_impl.h"
-#include "audio_buffer.h"
-
-// TODO(ajm): implement the underlying level estimator component.
-
-namespace webrtc {
-
-typedef void Handle;
-
-namespace {
-/*int EstimateLevel(AudioBuffer* audio, Handle* my_handle) {
-  assert(audio->samples_per_split_channel() <= 160);
-
-  WebRtc_Word16* mixed_data = audio->low_pass_split_data(0);
-  if (audio->num_channels() > 1) {
-    audio->CopyAndMixLowPass(1);
-    mixed_data = audio->mixed_low_pass_data(0);
-  }
-
-  int err = UpdateLvlEst(my_handle,
-                         mixed_data,
-                         audio->samples_per_split_channel());
-  if (err != AudioProcessing::kNoError) {
-    return GetHandleError(my_handle);
-  }
-
-  return AudioProcessing::kNoError;
-}
-
-int GetMetricsLocal(Handle* my_handle, LevelEstimator::Metrics* metrics) {
-  level_t levels;
-  memset(&levels, 0, sizeof(levels));
-
-  int err = ExportLevels(my_handle, &levels, 2);
-  if (err != AudioProcessing::kNoError) {
-    return err;
-  }
-  metrics->signal.instant = levels.instant;
-  metrics->signal.average = levels.average;
-  metrics->signal.maximum = levels.max;
-  metrics->signal.minimum = levels.min;
-
-  err = ExportLevels(my_handle, &levels, 1);
-  if (err != AudioProcessing::kNoError) {
-    return err;
-  }
-  metrics->speech.instant = levels.instant;
-  metrics->speech.average = levels.average;
-  metrics->speech.maximum = levels.max;
-  metrics->speech.minimum = levels.min;
-
-  err = ExportLevels(my_handle, &levels, 0);
-  if (err != AudioProcessing::kNoError) {
-    return err;
-  }
-  metrics->noise.instant = levels.instant;
-  metrics->noise.average = levels.average;
-  metrics->noise.maximum = levels.max;
-  metrics->noise.minimum = levels.min;
-
-  return AudioProcessing::kNoError;
-}*/
-}  // namespace
-
-LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessingImpl* apm)
-  : ProcessingComponent(apm),
-    apm_(apm) {}
-
-LevelEstimatorImpl::~LevelEstimatorImpl() {}
-
-int LevelEstimatorImpl::AnalyzeReverseStream(AudioBuffer* /*audio*/) {
-  return apm_->kUnsupportedComponentError;
-  /*if (!is_component_enabled()) {
-    return apm_->kNoError;
-  }
-
-  return EstimateLevel(audio, static_cast<Handle*>(handle(1)));*/
-}
-
-int LevelEstimatorImpl::ProcessCaptureAudio(AudioBuffer* /*audio*/) {
-  return apm_->kUnsupportedComponentError;
-  /*if (!is_component_enabled()) {
-    return apm_->kNoError;
-  }
-
-  return EstimateLevel(audio, static_cast<Handle*>(handle(0)));*/
-}
-
-int LevelEstimatorImpl::Enable(bool /*enable*/) {
-  CriticalSectionScoped crit_scoped(*apm_->crit());
-  return apm_->kUnsupportedComponentError;
-  //return EnableComponent(enable);
-}
-
-bool LevelEstimatorImpl::is_enabled() const {
-  return is_component_enabled();
-}
-
-int LevelEstimatorImpl::GetMetrics(LevelEstimator::Metrics* /*metrics*/,
-                                   LevelEstimator::Metrics* /*reverse_metrics*/) {
-  return apm_->kUnsupportedComponentError;
-  /*if (!is_component_enabled()) {
-    return apm_->kNotEnabledError;
-  }
-
-  int err = GetMetricsLocal(static_cast<Handle*>(handle(0)), metrics);
-  if (err != apm_->kNoError) {
-    return err;
-  }
-
-  err = GetMetricsLocal(static_cast<Handle*>(handle(1)), reverse_metrics);
-  if (err != apm_->kNoError) {
-    return err;
-  }
-
-  return apm_->kNoError;*/
-}
-
-int LevelEstimatorImpl::get_version(char* version,
-                                    int version_len_bytes) const {
-  // An empty string is used to indicate no version information.
-  memset(version, 0, version_len_bytes);
-  return apm_->kNoError;
-}
-
-void* LevelEstimatorImpl::CreateHandle() const {
-  Handle* handle = NULL;
-  /*if (CreateLvlEst(&handle) != apm_->kNoError) {
-    handle = NULL;
-  } else {
-    assert(handle != NULL);
-  }*/
-
-  return handle;
-}
-
-int LevelEstimatorImpl::DestroyHandle(void* /*handle*/) const {
-  return apm_->kUnsupportedComponentError;
-  //return FreeLvlEst(static_cast<Handle*>(handle));
-}
-
-int LevelEstimatorImpl::InitializeHandle(void* /*handle*/) const {
-  return apm_->kUnsupportedComponentError;
-  /*const double kIntervalSeconds = 1.5;
-  return InitLvlEst(static_cast<Handle*>(handle),
-                    apm_->sample_rate_hz(),
-                    kIntervalSeconds);*/
-}
-
-int LevelEstimatorImpl::ConfigureHandle(void* /*handle*/) const {
-  return apm_->kUnsupportedComponentError;
-  //return apm_->kNoError;
-}
-
-int LevelEstimatorImpl::num_handles_required() const {
-  return apm_->kUnsupportedComponentError;
-  //return 2;
-}
-
-int LevelEstimatorImpl::GetHandleError(void* handle) const {
-  // The component has no detailed errors.
-  assert(handle != NULL);
-  return apm_->kUnspecifiedError;
-}
-}  // namespace webrtc
diff --git a/src/modules/audio_processing/main/test/process_test/Android.mk b/src/modules/audio_processing/main/test/process_test/Android.mk
deleted file mode 100644
index 23080aa..0000000
--- a/src/modules/audio_processing/main/test/process_test/Android.mk
+++ /dev/null
@@ -1,48 +0,0 @@
-#  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-#  Use of this source code is governed by a BSD-style license
-#  that can be found in the LICENSE file in the root of the source
-#  tree. An additional intellectual property rights grant can be found
-#  in the file PATENTS.  All contributing project authors may
-#  be found in the AUTHORS file in the root of the source tree.
-
-LOCAL_PATH:= $(call my-dir)
-
-# apm test app
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_TAGS := tests
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_SRC_FILES:= \
-    process_test.cc
-
-# Flags passed to both C and C++ files.
-LOCAL_CFLAGS := \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' \
-    '-DWEBRTC_THREAD_RR' \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
-
-LOCAL_CPPFLAGS := 
-LOCAL_LDFLAGS :=
-LOCAL_C_INCLUDES := \
-    external/gtest/include \
-    $(LOCAL_PATH)/../../../../../system_wrappers/interface \
-    $(LOCAL_PATH)/../../interface \
-    $(LOCAL_PATH)/../../../../interface \
-    $(LOCAL_PATH)/../../../../..
-
-LOCAL_STATIC_LIBRARIES := \
-    libgtest 
-
-LOCAL_SHARED_LIBRARIES := \
-    libutils \
-    libstlport \
-    libwebrtc_audio_preprocessing 
-
-LOCAL_MODULE:= webrtc_apm_process_test
-
-include external/stlport/libstlport.mk
-include $(BUILD_EXECUTABLE)
diff --git a/src/modules/audio_processing/main/test/process_test/process_test.cc b/src/modules/audio_processing/main/test/process_test/process_test.cc
deleted file mode 100644
index c62345f..0000000
--- a/src/modules/audio_processing/main/test/process_test/process_test.cc
+++ /dev/null
@@ -1,628 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdio.h>
-#include <string.h>
-#ifdef WEBRTC_ANDROID
-#include <sys/stat.h>
-#endif
-
-#include "tick_util.h"
-#include "gtest/gtest.h"
-#include "module_common_types.h"
-
-#include "audio_processing.h"
-
-#include "cpu_features_wrapper.h"
-
-using webrtc::AudioFrame;
-using webrtc::TickInterval;
-using webrtc::TickTime;
-
-using webrtc::AudioProcessing;
-using webrtc::GainControl;
-using webrtc::NoiseSuppression;
-
-void usage() {
-  printf(
-  "Usage: process_test [options] [-ir REVERSE_FILE] [-i PRIMARY_FILE]\n");
-  printf(
-  "  [-o OUT_FILE]\n");
-  printf(
-  "process_test is a test application for AudioProcessing.\n\n"
-  "When -ir or -i is specified the files will be processed directly in a\n"
-  "simulation mode. Otherwise the full set of test files is expected to be\n"
-  "present in the working directory.\n");
-  printf("\n");
-  printf("Options\n");
-  printf("General configuration:\n");
-  printf("  -fs SAMPLE_RATE_HZ\n");
-  printf("  -ch CHANNELS_IN CHANNELS_OUT\n");
-  printf("  -rch REVERSE_CHANNELS\n");
-  printf("\n");
-  printf("Component configuration:\n");
-  printf(
-  "All components are disabled by default. Each block below begins with a\n"
-  "flag to enable the component with default settings. The subsequent flags\n"
-  "in the block are used to provide configuration settings.\n");
-  printf("\n  -aec     Echo cancellation\n");
-  printf("  --drift_compensation\n");
-  printf("  --no_drift_compensation\n");
-  printf("\n  -aecm    Echo control mobile\n");
-  printf("\n  -agc     Gain control\n");
-  printf("  --analog\n");
-  printf("  --adaptive_digital\n");
-  printf("  --fixed_digital\n");
-  printf("  --target_level LEVEL\n");
-  printf("  --compression_gain GAIN\n");
-  printf("  --limiter\n");
-  printf("  --no_limiter\n");
-  printf("\n  -hpf     High pass filter\n");
-  printf("\n  -ns      Noise suppression\n");
-  printf("  --ns_low\n");
-  printf("  --ns_moderate\n");
-  printf("  --ns_high\n");
-  printf("  --ns_very_high\n");
-  printf("\n  -vad     Voice activity detection\n");
-  printf("  --vad_out_file FILE");
-  printf("\n");
-  printf("Modifiers:\n");
-  printf("  --perf          Measure performance.\n");
-  printf("  --quiet         Suppress text output.\n");
-  printf("  --no_progress   Suppress progress.\n");
-  printf("  --version       Print version information and exit.\n");
-}
-
-// void function for gtest.
-void void_main(int argc, char* argv[]) {
-  if (argc > 1 && strcmp(argv[1], "--help") == 0) {
-    usage();
-    return;
-  }
-
-  if (argc < 2) {
-    printf("Did you mean to run without arguments?\n");
-    printf("Try `process_test --help' for more information.\n\n");
-  }
-
-  AudioProcessing* apm = AudioProcessing::Create(0);
-  ASSERT_TRUE(apm != NULL);
-
-  WebRtc_Word8 version[1024];
-  WebRtc_UWord32 version_bytes_remaining = sizeof(version);
-  WebRtc_UWord32 version_position = 0;
-
-  const char* far_filename = NULL;
-  const char* near_filename = NULL;
-  const char* out_filename = NULL;
-  const char* vad_out_filename = NULL;
-
-  int32_t sample_rate_hz = 16000;
-  int32_t device_sample_rate_hz = 16000;
-
-  int num_capture_input_channels = 1;
-  int num_capture_output_channels = 1;
-  int num_render_channels = 1;
-
-  int samples_per_channel = sample_rate_hz / 100;
-
-  bool simulating = false;
-  bool perf_testing = false;
-  bool verbose = true;
-  bool progress = true;
-  //bool interleaved = true;
-
-  for (int i = 1; i < argc; i++) {
-     if (strcmp(argv[i], "-ir") == 0) {
-      i++;
-      ASSERT_LT(i, argc) << "Specify filename after -ir";
-      far_filename = argv[i];
-      simulating = true;
-
-    } else if (strcmp(argv[i], "-i") == 0) {
-      i++;
-      ASSERT_LT(i, argc) << "Specify filename after -i";
-      near_filename = argv[i];
-      simulating = true;
-
-    } else if (strcmp(argv[i], "-o") == 0) {
-      i++;
-      ASSERT_LT(i, argc) << "Specify filename after -o";
-      out_filename = argv[i];
-
-    } else if (strcmp(argv[i], "-fs") == 0) {
-      i++;
-      ASSERT_LT(i, argc) << "Specify sample rate after -fs";
-      ASSERT_EQ(1, sscanf(argv[i], "%d", &sample_rate_hz));
-      samples_per_channel = sample_rate_hz / 100;
-
-      ASSERT_EQ(apm->kNoError,
-                apm->set_sample_rate_hz(sample_rate_hz));
-
-    } else if (strcmp(argv[i], "-ch") == 0) {
-      i++;
-      ASSERT_LT(i + 1, argc) << "Specify number of channels after -ch";
-      ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_input_channels));
-      i++;
-      ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_output_channels));
-
-      ASSERT_EQ(apm->kNoError,
-                apm->set_num_channels(num_capture_input_channels,
-                                      num_capture_output_channels));
-
-    } else if (strcmp(argv[i], "-rch") == 0) {
-      i++;
-      ASSERT_LT(i, argc) << "Specify number of channels after -rch";
-      ASSERT_EQ(1, sscanf(argv[i], "%d", &num_render_channels));
-
-      ASSERT_EQ(apm->kNoError,
-                apm->set_num_reverse_channels(num_render_channels));
-
-    } else if (strcmp(argv[i], "-aec") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
-
-    } else if (strcmp(argv[i], "-noasm") == 0) {
-      WebRtc_GetCPUInfo = WebRtc_GetCPUInfoNoASM;
-
-    } else if (strcmp(argv[i], "--drift_compensation") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
-      // TODO(ajm): this is enabled in the VQE test app by default. Investigate
-      //            why it can give better performance despite passing zeros.
-      ASSERT_EQ(apm->kNoError,
-                apm->echo_cancellation()->enable_drift_compensation(true));
-    } else if (strcmp(argv[i], "--no_drift_compensation") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-                apm->echo_cancellation()->enable_drift_compensation(false));
-
-    } else if (strcmp(argv[i], "-aecm") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->echo_control_mobile()->Enable(true));
-
-    } else if (strcmp(argv[i], "-agc") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
-
-    } else if (strcmp(argv[i], "--analog") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-                apm->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
-
-    } else if (strcmp(argv[i], "--adaptive_digital") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-                apm->gain_control()->set_mode(GainControl::kAdaptiveDigital));
-
-    } else if (strcmp(argv[i], "--fixed_digital") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-                apm->gain_control()->set_mode(GainControl::kFixedDigital));
-
-    } else if (strcmp(argv[i], "--target_level") == 0) {
-      i++;
-      int level;
-      ASSERT_EQ(1, sscanf(argv[i], "%d", &level));
-
-      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-                apm->gain_control()->set_target_level_dbfs(level));
-
-    } else if (strcmp(argv[i], "--compression_gain") == 0) {
-      i++;
-      int gain;
-      ASSERT_EQ(1, sscanf(argv[i], "%d", &gain));
-
-      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-                apm->gain_control()->set_compression_gain_db(gain));
-
-    } else if (strcmp(argv[i], "--limiter") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-                apm->gain_control()->enable_limiter(true));
-
-    } else if (strcmp(argv[i], "--no_limiter") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-                apm->gain_control()->enable_limiter(false));
-
-    } else if (strcmp(argv[i], "-hpf") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->high_pass_filter()->Enable(true));
-
-    } else if (strcmp(argv[i], "-ns") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
-
-    } else if (strcmp(argv[i], "--ns_low") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-          apm->noise_suppression()->set_level(NoiseSuppression::kLow));
-
-    } else if (strcmp(argv[i], "--ns_moderate") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-          apm->noise_suppression()->set_level(NoiseSuppression::kModerate));
-
-    } else if (strcmp(argv[i], "--ns_high") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-          apm->noise_suppression()->set_level(NoiseSuppression::kHigh));
-
-    } else if (strcmp(argv[i], "--ns_very_high") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
-      ASSERT_EQ(apm->kNoError,
-          apm->noise_suppression()->set_level(NoiseSuppression::kVeryHigh));
-
-    } else if (strcmp(argv[i], "-vad") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
-
-    } else if (strcmp(argv[i], "--vad_out_file") == 0) {
-      i++;
-      ASSERT_LT(i, argc) << "Specify filename after --vad_out_file";
-      vad_out_filename = argv[i];
-
-    } else if (strcmp(argv[i], "--perf") == 0) {
-      perf_testing = true;
-
-    } else if (strcmp(argv[i], "--quiet") == 0) {
-      verbose = false;
-      progress = false;
-
-    } else if (strcmp(argv[i], "--no_progress") == 0) {
-      progress = false;
-
-    } else if (strcmp(argv[i], "--version") == 0) {
-      ASSERT_EQ(apm->kNoError, apm->Version(version,
-                                            version_bytes_remaining,
-                                            version_position));
-      printf("%s\n", version);
-      return;
-
-    } else {
-      FAIL() << "Unrecognized argument " << argv[i];
-    }
-  }
-
-  if (verbose) {
-    printf("Sample rate: %d Hz\n", sample_rate_hz);
-    printf("Primary channels: %d (in), %d (out)\n",
-           num_capture_input_channels,
-           num_capture_output_channels);
-    printf("Reverse channels: %d \n", num_render_channels);
-  }
-
-  const char far_file_default[] = "apm_far.pcm";
-  const char near_file_default[] = "apm_near.pcm";
-  const char out_file_default[] = "out.pcm";
-  const char event_filename[] = "apm_event.dat";
-  const char delay_filename[] = "apm_delay.dat";
-  const char drift_filename[] = "apm_drift.dat";
-  const char vad_file_default[] = "vad_out.dat";
-
-  if (!simulating) {
-    far_filename = far_file_default;
-    near_filename = near_file_default;
-  }
-
-  if (out_filename == NULL) {
-    out_filename = out_file_default;
-  }
-
-  if (vad_out_filename == NULL) {
-    vad_out_filename = vad_file_default;
-  }
-
-  FILE* far_file = NULL;
-  FILE* near_file = NULL;
-  FILE* out_file = NULL;
-  FILE* event_file = NULL;
-  FILE* delay_file = NULL;
-  FILE* drift_file = NULL;
-  FILE* vad_out_file = NULL;
-
-  if (far_filename != NULL) {
-    far_file = fopen(far_filename, "rb");
-    ASSERT_TRUE(NULL != far_file) << "Unable to open far-end audio file "
-                                  << far_filename;
-  }
-
-  near_file = fopen(near_filename, "rb");
-  ASSERT_TRUE(NULL != near_file) << "Unable to open near-end audio file "
-                                 << near_filename;
-  struct stat st;
-  stat(near_filename, &st);
-  int near_size_samples = st.st_size / sizeof(int16_t);
-
-  out_file = fopen(out_filename, "wb");
-  ASSERT_TRUE(NULL != out_file) << "Unable to open output audio file "
-                                << out_filename;
-
-  if (!simulating) {
-    event_file = fopen(event_filename, "rb");
-    ASSERT_TRUE(NULL != event_file) << "Unable to open event file "
-                                    << event_filename;
-
-    delay_file = fopen(delay_filename, "rb");
-    ASSERT_TRUE(NULL != delay_file) << "Unable to open buffer file "
-                                    << delay_filename;
-
-    drift_file = fopen(drift_filename, "rb");
-    ASSERT_TRUE(NULL != drift_file) << "Unable to open drift file "
-                                    << drift_filename;
-  }
-
-  if (apm->voice_detection()->is_enabled()) {
-    vad_out_file = fopen(vad_out_filename, "wb");
-    ASSERT_TRUE(NULL != vad_out_file) << "Unable to open VAD output file "
-                                      << vad_out_file;
-  }
-
-  enum Events {
-    kInitializeEvent,
-    kRenderEvent,
-    kCaptureEvent,
-    kResetEventDeprecated
-  };
-  int16_t event = 0;
-  size_t read_count = 0;
-  int reverse_count = 0;
-  int primary_count = 0;
-  int near_read_samples = 0;
-  TickInterval acc_ticks;
-
-  AudioFrame far_frame;
-  far_frame._frequencyInHz = sample_rate_hz;
-
-  AudioFrame near_frame;
-  near_frame._frequencyInHz = sample_rate_hz;
-
-  int delay_ms = 0;
-  int drift_samples = 0;
-  int capture_level = 127;
-  int8_t stream_has_voice = 0;
-
-  TickTime t0 = TickTime::Now();
-  TickTime t1 = t0;
-  WebRtc_Word64 max_time_us = 0;
-  WebRtc_Word64 max_time_reverse_us = 0;
-  WebRtc_Word64 min_time_us = 1e6;
-  WebRtc_Word64 min_time_reverse_us = 1e6;
-
-  while (simulating || feof(event_file) == 0) {
-    std::ostringstream trace_stream;
-    trace_stream << "Processed frames: " << reverse_count << " (reverse), "
-                 << primary_count << " (primary)";
-    SCOPED_TRACE(trace_stream.str());
-
-
-    if (simulating) {
-      if (far_file == NULL) {
-        event = kCaptureEvent;
-      } else {
-        if (event == kRenderEvent) {
-          event = kCaptureEvent;
-        } else {
-          event = kRenderEvent;
-        }
-      }
-    } else {
-      read_count = fread(&event, sizeof(event), 1, event_file);
-      if (read_count != 1) {
-        break;
-      }
-      //if (fread(&event, sizeof(event), 1, event_file) != 1) {
-      //  break; // This is expected.
-      //}
-    }
-
-    if (event == kInitializeEvent || event == kResetEventDeprecated) {
-      ASSERT_EQ(1u,
-          fread(&sample_rate_hz, sizeof(sample_rate_hz), 1, event_file));
-      samples_per_channel = sample_rate_hz / 100;
-
-      ASSERT_EQ(1u,
-          fread(&device_sample_rate_hz,
-                sizeof(device_sample_rate_hz),
-                1,
-                event_file));
-
-      ASSERT_EQ(apm->kNoError,
-          apm->set_sample_rate_hz(sample_rate_hz));
-
-      ASSERT_EQ(apm->kNoError,
-                apm->echo_cancellation()->set_device_sample_rate_hz(
-                    device_sample_rate_hz));
-
-      far_frame._frequencyInHz = sample_rate_hz;
-      near_frame._frequencyInHz = sample_rate_hz;
-
-      if (verbose) {
-        printf("Init at frame: %d (primary), %d (reverse)\n",
-            primary_count, reverse_count);
-        printf("  Sample rate: %d Hz\n", sample_rate_hz);
-      }
-
-    } else if (event == kRenderEvent) {
-      reverse_count++;
-      far_frame._audioChannel = num_render_channels;
-      far_frame._payloadDataLengthInSamples =
-          num_render_channels * samples_per_channel;
-
-      read_count = fread(far_frame._payloadData,
-                         sizeof(WebRtc_Word16),
-                         far_frame._payloadDataLengthInSamples,
-                         far_file);
-
-      if (simulating) {
-        if (read_count != far_frame._payloadDataLengthInSamples) {
-          break; // This is expected.
-        }
-      } else {
-        ASSERT_EQ(read_count,
-            far_frame._payloadDataLengthInSamples);
-      }
-
-      if (perf_testing) {
-        t0 = TickTime::Now();
-      }
-
-      ASSERT_EQ(apm->kNoError,
-                apm->AnalyzeReverseStream(&far_frame));
-
-      if (perf_testing) {
-        t1 = TickTime::Now();
-        TickInterval tick_diff = t1 - t0;
-        acc_ticks += tick_diff;
-        if (tick_diff.Microseconds() > max_time_reverse_us) {
-          max_time_reverse_us = tick_diff.Microseconds();
-        }
-        if (tick_diff.Microseconds() < min_time_reverse_us) {
-          min_time_reverse_us = tick_diff.Microseconds();
-        }
-      }
-
-    } else if (event == kCaptureEvent) {
-      primary_count++;
-      near_frame._audioChannel = num_capture_input_channels;
-      near_frame._payloadDataLengthInSamples =
-          num_capture_input_channels * samples_per_channel;
-
-      read_count = fread(near_frame._payloadData,
-                         sizeof(WebRtc_Word16),
-                         near_frame._payloadDataLengthInSamples,
-                         near_file);
-
-      near_read_samples += read_count;
-      if (progress && primary_count % 100 == 0) {
-        printf("%.0f%% complete\r",
-            (near_read_samples * 100.0) / near_size_samples);
-        fflush(stdout);
-      }
-      if (simulating) {
-        if (read_count != near_frame._payloadDataLengthInSamples) {
-          break; // This is expected.
-        }
-
-        delay_ms = 0;
-        drift_samples = 0;
-      } else {
-        ASSERT_EQ(read_count,
-            near_frame._payloadDataLengthInSamples);
-
-        // TODO(ajm): sizeof(delay_ms) for current files?
-        ASSERT_EQ(1u,
-            fread(&delay_ms, 2, 1, delay_file));
-        ASSERT_EQ(1u,
-            fread(&drift_samples, sizeof(drift_samples), 1, drift_file));
-      }
-
-      if (perf_testing) {
-        t0 = TickTime::Now();
-      }
-
-      // TODO(ajm): fake an analog gain while simulating.
-
-      int capture_level_in = capture_level;
-      ASSERT_EQ(apm->kNoError,
-                apm->gain_control()->set_stream_analog_level(capture_level));
-      ASSERT_EQ(apm->kNoError,
-                apm->set_stream_delay_ms(delay_ms));
-      ASSERT_EQ(apm->kNoError,
-          apm->echo_cancellation()->set_stream_drift_samples(drift_samples));
-
-      int err = apm->ProcessStream(&near_frame);
-      if (err == apm->kBadStreamParameterWarning) {
-        printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
-      }
-      ASSERT_TRUE(err == apm->kNoError ||
-                  err == apm->kBadStreamParameterWarning);
-
-      capture_level = apm->gain_control()->stream_analog_level();
-
-      stream_has_voice =
-          static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
-      if (vad_out_file != NULL) {
-        ASSERT_EQ(1u, fwrite(&stream_has_voice,
-                             sizeof(stream_has_voice),
-                             1,
-                             vad_out_file));
-      }
-
-      if (apm->gain_control()->mode() != GainControl::kAdaptiveAnalog) {
-        ASSERT_EQ(capture_level_in, capture_level);
-      }
-
-      if (perf_testing) {
-        t1 = TickTime::Now();
-        TickInterval tick_diff = t1 - t0;
-        acc_ticks += tick_diff;
-        if (tick_diff.Microseconds() > max_time_us) {
-          max_time_us = tick_diff.Microseconds();
-        }
-        if (tick_diff.Microseconds() < min_time_us) {
-          min_time_us = tick_diff.Microseconds();
-        }
-      }
-
-      ASSERT_EQ(near_frame._payloadDataLengthInSamples,
-                fwrite(near_frame._payloadData,
-                       sizeof(WebRtc_Word16),
-                       near_frame._payloadDataLengthInSamples,
-                       out_file));
-    }
-    else {
-      FAIL() << "Event " << event << " is unrecognized";
-    }
-  }
-
-  if (verbose) {
-    printf("\nProcessed frames: %d (primary), %d (reverse)\n",
-        primary_count, reverse_count);
-  }
-
-  int8_t temp_int8;
-  if (far_file != NULL) {
-    read_count = fread(&temp_int8, sizeof(temp_int8), 1, far_file);
-    EXPECT_NE(0, feof(far_file)) << "Far-end file not fully processed";
-  }
-  read_count = fread(&temp_int8, sizeof(temp_int8), 1, near_file);
-  EXPECT_NE(0, feof(near_file)) << "Near-end file not fully processed";
-
-  if (!simulating) {
-    read_count = fread(&temp_int8, sizeof(temp_int8), 1, event_file);
-    EXPECT_NE(0, feof(event_file)) << "Event file not fully processed";
-    read_count = fread(&temp_int8, sizeof(temp_int8), 1, delay_file);
-    EXPECT_NE(0, feof(delay_file)) << "Delay file not fully processed";
-    read_count = fread(&temp_int8, sizeof(temp_int8), 1, drift_file);
-    EXPECT_NE(0, feof(drift_file)) << "Drift file not fully processed";
-  }
-
-  if (perf_testing) {
-    if (primary_count > 0) {
-      WebRtc_Word64 exec_time = acc_ticks.Milliseconds();
-      printf("\nTotal time: %.3f s, file time: %.2f s\n",
-        exec_time * 0.001, primary_count * 0.01);
-      printf("Time per frame: %.3f ms (average), %.3f ms (max),"
-             " %.3f ms (min)\n",
-          (exec_time * 1.0) / primary_count,
-          (max_time_us + max_time_reverse_us) / 1000.0,
-          (min_time_us + min_time_reverse_us) / 1000.0);
-    } else {
-      printf("Warning: no capture frames\n");
-    }
-  }
-
-  AudioProcessing::Destroy(apm);
-  apm = NULL;
-}
-
-int main(int argc, char* argv[])
-{
-  void_main(argc, argv);
-
-  return 0;
-}
diff --git a/src/modules/audio_processing/main/test/unit_test/Android.mk b/src/modules/audio_processing/main/test/unit_test/Android.mk
deleted file mode 100644
index b2029cf..0000000
--- a/src/modules/audio_processing/main/test/unit_test/Android.mk
+++ /dev/null
@@ -1,49 +0,0 @@
-#  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-#  Use of this source code is governed by a BSD-style license
-#  that can be found in the LICENSE file in the root of the source
-#  tree. An additional intellectual property rights grant can be found
-#  in the file PATENTS.  All contributing project authors may
-#  be found in the AUTHORS file in the root of the source tree.
-
-LOCAL_PATH:= $(call my-dir)
-
-# apm test app
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_TAGS := tests
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_SRC_FILES:= \
-    unit_test.cc
-
-# Flags passed to both C and C++ files.
-LOCAL_CFLAGS := \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' \
-    '-DWEBRTC_THREAD_RR' \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
-
-LOCAL_CPPFLAGS := 
-LOCAL_LDFLAGS :=
-LOCAL_C_INCLUDES := \
-    external/gtest/include \
-    $(LOCAL_PATH)/../../../../../system_wrappers/interface \
-    $(LOCAL_PATH)/../../../../../common_audio/signal_processing_library/main/interface \
-    $(LOCAL_PATH)/../../interface \
-    $(LOCAL_PATH)/../../../../interface \
-    $(LOCAL_PATH)/../../../../..
-
-LOCAL_STATIC_LIBRARIES := \
-    libgtest 
-
-LOCAL_SHARED_LIBRARIES := \
-    libutils \
-    libstlport \
-    libwebrtc_audio_preprocessing 
-
-LOCAL_MODULE:= webrtc_apm_unit_test
-
-include external/stlport/libstlport.mk
-include $(BUILD_EXECUTABLE)
diff --git a/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.cc b/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.cc
deleted file mode 100644
index c82ffdb..0000000
--- a/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.cc
+++ /dev/null
@@ -1,1111 +0,0 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-
-#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
-#include "audio_processing_unittest.pb.h"
-
-#include <algorithm>
-
-#include <google/protobuf/stubs/once.h>
-#include <google/protobuf/io/coded_stream.h>
-#include <google/protobuf/wire_format_lite_inl.h>
-// @@protoc_insertion_point(includes)
-
-namespace audio_processing_unittest {
-
-void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto() {
-  delete Test::default_instance_;
-  delete Test_Statistic::default_instance_;
-  delete Test_EchoMetrics::default_instance_;
-  delete OutputData::default_instance_;
-}
-
-void protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto() {
-  static bool already_here = false;
-  if (already_here) return;
-  already_here = true;
-  GOOGLE_PROTOBUF_VERIFY_VERSION;
-
-  Test::default_instance_ = new Test();
-  Test_Statistic::default_instance_ = new Test_Statistic();
-  Test_EchoMetrics::default_instance_ = new Test_EchoMetrics();
-  OutputData::default_instance_ = new OutputData();
-  Test::default_instance_->InitAsDefaultInstance();
-  Test_Statistic::default_instance_->InitAsDefaultInstance();
-  Test_EchoMetrics::default_instance_->InitAsDefaultInstance();
-  OutputData::default_instance_->InitAsDefaultInstance();
-  ::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto);
-}
-
-// Force AddDescriptors() to be called at static initialization time.
-struct StaticDescriptorInitializer_audio_5fprocessing_5funittest_2eproto {
-  StaticDescriptorInitializer_audio_5fprocessing_5funittest_2eproto() {
-    protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
-  }
-} static_descriptor_initializer_audio_5fprocessing_5funittest_2eproto_;
-
-
-// ===================================================================
-
-#ifndef _MSC_VER
-const int Test_Statistic::kInstantFieldNumber;
-const int Test_Statistic::kAverageFieldNumber;
-const int Test_Statistic::kMaximumFieldNumber;
-const int Test_Statistic::kMinimumFieldNumber;
-#endif  // !_MSC_VER
-
-Test_Statistic::Test_Statistic()
-  : ::google::protobuf::MessageLite() {
-  SharedCtor();
-}
-
-void Test_Statistic::InitAsDefaultInstance() {
-}
-
-Test_Statistic::Test_Statistic(const Test_Statistic& from)
-  : ::google::protobuf::MessageLite() {
-  SharedCtor();
-  MergeFrom(from);
-}
-
-void Test_Statistic::SharedCtor() {
-  _cached_size_ = 0;
-  instant_ = 0;
-  average_ = 0;
-  maximum_ = 0;
-  minimum_ = 0;
-  ::memset(_has_bits_, 0, sizeof(_has_bits_));
-}
-
-Test_Statistic::~Test_Statistic() {
-  SharedDtor();
-}
-
-void Test_Statistic::SharedDtor() {
-  if (this != default_instance_) {
-  }
-}
-
-void Test_Statistic::SetCachedSize(int size) const {
-  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
-  _cached_size_ = size;
-  GOOGLE_SAFE_CONCURRENT_WRITES_END();
-}
-const Test_Statistic& Test_Statistic::default_instance() {
-  if (default_instance_ == NULL) protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();  return *default_instance_;
-}
-
-Test_Statistic* Test_Statistic::default_instance_ = NULL;
-
-Test_Statistic* Test_Statistic::New() const {
-  return new Test_Statistic;
-}
-
-void Test_Statistic::Clear() {
-  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    instant_ = 0;
-    average_ = 0;
-    maximum_ = 0;
-    minimum_ = 0;
-  }
-  ::memset(_has_bits_, 0, sizeof(_has_bits_));
-}
-
-bool Test_Statistic::MergePartialFromCodedStream(
-    ::google::protobuf::io::CodedInputStream* input) {
-#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
-  ::google::protobuf::uint32 tag;
-  while ((tag = input->ReadTag()) != 0) {
-    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
-      // optional int32 instant = 1;
-      case 1: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
-                 input, &instant_)));
-          set_has_instant();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(16)) goto parse_average;
-        break;
-      }
-      
-      // optional int32 average = 2;
-      case 2: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_average:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
-                 input, &average_)));
-          set_has_average();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(24)) goto parse_maximum;
-        break;
-      }
-      
-      // optional int32 maximum = 3;
-      case 3: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_maximum:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
-                 input, &maximum_)));
-          set_has_maximum();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(32)) goto parse_minimum;
-        break;
-      }
-      
-      // optional int32 minimum = 4;
-      case 4: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_minimum:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
-                 input, &minimum_)));
-          set_has_minimum();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectAtEnd()) return true;
-        break;
-      }
-      
-      default: {
-      handle_uninterpreted:
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
-          return true;
-        }
-        DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag, NULL));
-        break;
-      }
-    }
-  }
-  return true;
-#undef DO_
-}
-
-void Test_Statistic::SerializeWithCachedSizes(
-    ::google::protobuf::io::CodedOutputStream* output) const {
-  // optional int32 instant = 1;
-  if (has_instant()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->instant(), output);
-  }
-  
-  // optional int32 average = 2;
-  if (has_average()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt32(2, this->average(), output);
-  }
-  
-  // optional int32 maximum = 3;
-  if (has_maximum()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt32(3, this->maximum(), output);
-  }
-  
-  // optional int32 minimum = 4;
-  if (has_minimum()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt32(4, this->minimum(), output);
-  }
-  
-}
-
-int Test_Statistic::ByteSize() const {
-  int total_size = 0;
-  
-  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    // optional int32 instant = 1;
-    if (has_instant()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int32Size(
-          this->instant());
-    }
-    
-    // optional int32 average = 2;
-    if (has_average()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int32Size(
-          this->average());
-    }
-    
-    // optional int32 maximum = 3;
-    if (has_maximum()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int32Size(
-          this->maximum());
-    }
-    
-    // optional int32 minimum = 4;
-    if (has_minimum()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int32Size(
-          this->minimum());
-    }
-    
-  }
-  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
-  _cached_size_ = total_size;
-  GOOGLE_SAFE_CONCURRENT_WRITES_END();
-  return total_size;
-}
-
-void Test_Statistic::CheckTypeAndMergeFrom(
-    const ::google::protobuf::MessageLite& from) {
-  MergeFrom(*::google::protobuf::down_cast<const Test_Statistic*>(&from));
-}
-
-void Test_Statistic::MergeFrom(const Test_Statistic& from) {
-  GOOGLE_CHECK_NE(&from, this);
-  if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    if (from.has_instant()) {
-      set_instant(from.instant());
-    }
-    if (from.has_average()) {
-      set_average(from.average());
-    }
-    if (from.has_maximum()) {
-      set_maximum(from.maximum());
-    }
-    if (from.has_minimum()) {
-      set_minimum(from.minimum());
-    }
-  }
-}
-
-void Test_Statistic::CopyFrom(const Test_Statistic& from) {
-  if (&from == this) return;
-  Clear();
-  MergeFrom(from);
-}
-
-bool Test_Statistic::IsInitialized() const {
-  
-  return true;
-}
-
-void Test_Statistic::Swap(Test_Statistic* other) {
-  if (other != this) {
-    std::swap(instant_, other->instant_);
-    std::swap(average_, other->average_);
-    std::swap(maximum_, other->maximum_);
-    std::swap(minimum_, other->minimum_);
-    std::swap(_has_bits_[0], other->_has_bits_[0]);
-    std::swap(_cached_size_, other->_cached_size_);
-  }
-}
-
-::std::string Test_Statistic::GetTypeName() const {
-  return "audio_processing_unittest.Test.Statistic";
-}
-
-
-// -------------------------------------------------------------------
-
-#ifndef _MSC_VER
-const int Test_EchoMetrics::kResidualEchoReturnLossFieldNumber;
-const int Test_EchoMetrics::kEchoReturnLossFieldNumber;
-const int Test_EchoMetrics::kEchoReturnLossEnhancementFieldNumber;
-const int Test_EchoMetrics::kANlpFieldNumber;
-#endif  // !_MSC_VER
-
-Test_EchoMetrics::Test_EchoMetrics()
-  : ::google::protobuf::MessageLite() {
-  SharedCtor();
-}
-
-void Test_EchoMetrics::InitAsDefaultInstance() {
-  residualechoreturnloss_ = const_cast< ::audio_processing_unittest::Test_Statistic*>(&::audio_processing_unittest::Test_Statistic::default_instance());
-  echoreturnloss_ = const_cast< ::audio_processing_unittest::Test_Statistic*>(&::audio_processing_unittest::Test_Statistic::default_instance());
-  echoreturnlossenhancement_ = const_cast< ::audio_processing_unittest::Test_Statistic*>(&::audio_processing_unittest::Test_Statistic::default_instance());
-  anlp_ = const_cast< ::audio_processing_unittest::Test_Statistic*>(&::audio_processing_unittest::Test_Statistic::default_instance());
-}
-
-Test_EchoMetrics::Test_EchoMetrics(const Test_EchoMetrics& from)
-  : ::google::protobuf::MessageLite() {
-  SharedCtor();
-  MergeFrom(from);
-}
-
-void Test_EchoMetrics::SharedCtor() {
-  _cached_size_ = 0;
-  residualechoreturnloss_ = NULL;
-  echoreturnloss_ = NULL;
-  echoreturnlossenhancement_ = NULL;
-  anlp_ = NULL;
-  ::memset(_has_bits_, 0, sizeof(_has_bits_));
-}
-
-Test_EchoMetrics::~Test_EchoMetrics() {
-  SharedDtor();
-}
-
-void Test_EchoMetrics::SharedDtor() {
-  if (this != default_instance_) {
-    delete residualechoreturnloss_;
-    delete echoreturnloss_;
-    delete echoreturnlossenhancement_;
-    delete anlp_;
-  }
-}
-
-void Test_EchoMetrics::SetCachedSize(int size) const {
-  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
-  _cached_size_ = size;
-  GOOGLE_SAFE_CONCURRENT_WRITES_END();
-}
-const Test_EchoMetrics& Test_EchoMetrics::default_instance() {
-  if (default_instance_ == NULL) protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();  return *default_instance_;
-}
-
-Test_EchoMetrics* Test_EchoMetrics::default_instance_ = NULL;
-
-Test_EchoMetrics* Test_EchoMetrics::New() const {
-  return new Test_EchoMetrics;
-}
-
-void Test_EchoMetrics::Clear() {
-  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    if (has_residualechoreturnloss()) {
-      if (residualechoreturnloss_ != NULL) residualechoreturnloss_->::audio_processing_unittest::Test_Statistic::Clear();
-    }
-    if (has_echoreturnloss()) {
-      if (echoreturnloss_ != NULL) echoreturnloss_->::audio_processing_unittest::Test_Statistic::Clear();
-    }
-    if (has_echoreturnlossenhancement()) {
-      if (echoreturnlossenhancement_ != NULL) echoreturnlossenhancement_->::audio_processing_unittest::Test_Statistic::Clear();
-    }
-    if (has_anlp()) {
-      if (anlp_ != NULL) anlp_->::audio_processing_unittest::Test_Statistic::Clear();
-    }
-  }
-  ::memset(_has_bits_, 0, sizeof(_has_bits_));
-}
-
-bool Test_EchoMetrics::MergePartialFromCodedStream(
-    ::google::protobuf::io::CodedInputStream* input) {
-#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
-  ::google::protobuf::uint32 tag;
-  while ((tag = input->ReadTag()) != 0) {
-    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
-      // optional .audio_processing_unittest.Test.Statistic residualEchoReturnLoss = 1;
-      case 1: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
-          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
-               input, mutable_residualechoreturnloss()));
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(18)) goto parse_echoReturnLoss;
-        break;
-      }
-      
-      // optional .audio_processing_unittest.Test.Statistic echoReturnLoss = 2;
-      case 2: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
-         parse_echoReturnLoss:
-          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
-               input, mutable_echoreturnloss()));
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(26)) goto parse_echoReturnLossEnhancement;
-        break;
-      }
-      
-      // optional .audio_processing_unittest.Test.Statistic echoReturnLossEnhancement = 3;
-      case 3: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
-         parse_echoReturnLossEnhancement:
-          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
-               input, mutable_echoreturnlossenhancement()));
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(34)) goto parse_aNlp;
-        break;
-      }
-      
-      // optional .audio_processing_unittest.Test.Statistic aNlp = 4;
-      case 4: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
-         parse_aNlp:
-          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
-               input, mutable_anlp()));
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectAtEnd()) return true;
-        break;
-      }
-      
-      default: {
-      handle_uninterpreted:
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
-          return true;
-        }
-        DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag, NULL));
-        break;
-      }
-    }
-  }
-  return true;
-#undef DO_
-}
-
-void Test_EchoMetrics::SerializeWithCachedSizes(
-    ::google::protobuf::io::CodedOutputStream* output) const {
-  // optional .audio_processing_unittest.Test.Statistic residualEchoReturnLoss = 1;
-  if (has_residualechoreturnloss()) {
-    ::google::protobuf::internal::WireFormatLite::WriteMessage(
-      1, this->residualechoreturnloss(), output);
-  }
-  
-  // optional .audio_processing_unittest.Test.Statistic echoReturnLoss = 2;
-  if (has_echoreturnloss()) {
-    ::google::protobuf::internal::WireFormatLite::WriteMessage(
-      2, this->echoreturnloss(), output);
-  }
-  
-  // optional .audio_processing_unittest.Test.Statistic echoReturnLossEnhancement = 3;
-  if (has_echoreturnlossenhancement()) {
-    ::google::protobuf::internal::WireFormatLite::WriteMessage(
-      3, this->echoreturnlossenhancement(), output);
-  }
-  
-  // optional .audio_processing_unittest.Test.Statistic aNlp = 4;
-  if (has_anlp()) {
-    ::google::protobuf::internal::WireFormatLite::WriteMessage(
-      4, this->anlp(), output);
-  }
-  
-}
-
-int Test_EchoMetrics::ByteSize() const {
-  int total_size = 0;
-  
-  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    // optional .audio_processing_unittest.Test.Statistic residualEchoReturnLoss = 1;
-    if (has_residualechoreturnloss()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
-          this->residualechoreturnloss());
-    }
-    
-    // optional .audio_processing_unittest.Test.Statistic echoReturnLoss = 2;
-    if (has_echoreturnloss()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
-          this->echoreturnloss());
-    }
-    
-    // optional .audio_processing_unittest.Test.Statistic echoReturnLossEnhancement = 3;
-    if (has_echoreturnlossenhancement()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
-          this->echoreturnlossenhancement());
-    }
-    
-    // optional .audio_processing_unittest.Test.Statistic aNlp = 4;
-    if (has_anlp()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
-          this->anlp());
-    }
-    
-  }
-  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
-  _cached_size_ = total_size;
-  GOOGLE_SAFE_CONCURRENT_WRITES_END();
-  return total_size;
-}
-
-void Test_EchoMetrics::CheckTypeAndMergeFrom(
-    const ::google::protobuf::MessageLite& from) {
-  MergeFrom(*::google::protobuf::down_cast<const Test_EchoMetrics*>(&from));
-}
-
-void Test_EchoMetrics::MergeFrom(const Test_EchoMetrics& from) {
-  GOOGLE_CHECK_NE(&from, this);
-  if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    if (from.has_residualechoreturnloss()) {
-      mutable_residualechoreturnloss()->::audio_processing_unittest::Test_Statistic::MergeFrom(from.residualechoreturnloss());
-    }
-    if (from.has_echoreturnloss()) {
-      mutable_echoreturnloss()->::audio_processing_unittest::Test_Statistic::MergeFrom(from.echoreturnloss());
-    }
-    if (from.has_echoreturnlossenhancement()) {
-      mutable_echoreturnlossenhancement()->::audio_processing_unittest::Test_Statistic::MergeFrom(from.echoreturnlossenhancement());
-    }
-    if (from.has_anlp()) {
-      mutable_anlp()->::audio_processing_unittest::Test_Statistic::MergeFrom(from.anlp());
-    }
-  }
-}
-
-void Test_EchoMetrics::CopyFrom(const Test_EchoMetrics& from) {
-  if (&from == this) return;
-  Clear();
-  MergeFrom(from);
-}
-
-bool Test_EchoMetrics::IsInitialized() const {
-  
-  return true;
-}
-
-void Test_EchoMetrics::Swap(Test_EchoMetrics* other) {
-  if (other != this) {
-    std::swap(residualechoreturnloss_, other->residualechoreturnloss_);
-    std::swap(echoreturnloss_, other->echoreturnloss_);
-    std::swap(echoreturnlossenhancement_, other->echoreturnlossenhancement_);
-    std::swap(anlp_, other->anlp_);
-    std::swap(_has_bits_[0], other->_has_bits_[0]);
-    std::swap(_cached_size_, other->_cached_size_);
-  }
-}
-
-::std::string Test_EchoMetrics::GetTypeName() const {
-  return "audio_processing_unittest.Test.EchoMetrics";
-}
-
-
-// -------------------------------------------------------------------
-
-#ifndef _MSC_VER
-const int Test::kNumReverseChannelsFieldNumber;
-const int Test::kNumChannelsFieldNumber;
-const int Test::kSampleRateFieldNumber;
-const int Test::kHasEchoCountFieldNumber;
-const int Test::kHasVoiceCountFieldNumber;
-const int Test::kIsSaturatedCountFieldNumber;
-const int Test::kEchoMetricsFieldNumber;
-#endif  // !_MSC_VER
-
-Test::Test()
-  : ::google::protobuf::MessageLite() {
-  SharedCtor();
-}
-
-void Test::InitAsDefaultInstance() {
-  echometrics_ = const_cast< ::audio_processing_unittest::Test_EchoMetrics*>(&::audio_processing_unittest::Test_EchoMetrics::default_instance());
-}
-
-Test::Test(const Test& from)
-  : ::google::protobuf::MessageLite() {
-  SharedCtor();
-  MergeFrom(from);
-}
-
-void Test::SharedCtor() {
-  _cached_size_ = 0;
-  numreversechannels_ = 0;
-  numchannels_ = 0;
-  samplerate_ = 0;
-  hasechocount_ = 0;
-  hasvoicecount_ = 0;
-  issaturatedcount_ = 0;
-  echometrics_ = NULL;
-  ::memset(_has_bits_, 0, sizeof(_has_bits_));
-}
-
-Test::~Test() {
-  SharedDtor();
-}
-
-void Test::SharedDtor() {
-  if (this != default_instance_) {
-    delete echometrics_;
-  }
-}
-
-void Test::SetCachedSize(int size) const {
-  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
-  _cached_size_ = size;
-  GOOGLE_SAFE_CONCURRENT_WRITES_END();
-}
-const Test& Test::default_instance() {
-  if (default_instance_ == NULL) protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();  return *default_instance_;
-}
-
-Test* Test::default_instance_ = NULL;
-
-Test* Test::New() const {
-  return new Test;
-}
-
-void Test::Clear() {
-  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    numreversechannels_ = 0;
-    numchannels_ = 0;
-    samplerate_ = 0;
-    hasechocount_ = 0;
-    hasvoicecount_ = 0;
-    issaturatedcount_ = 0;
-    if (has_echometrics()) {
-      if (echometrics_ != NULL) echometrics_->::audio_processing_unittest::Test_EchoMetrics::Clear();
-    }
-  }
-  ::memset(_has_bits_, 0, sizeof(_has_bits_));
-}
-
-bool Test::MergePartialFromCodedStream(
-    ::google::protobuf::io::CodedInputStream* input) {
-#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
-  ::google::protobuf::uint32 tag;
-  while ((tag = input->ReadTag()) != 0) {
-    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
-      // optional int32 numReverseChannels = 1;
-      case 1: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
-                 input, &numreversechannels_)));
-          set_has_numreversechannels();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(16)) goto parse_numChannels;
-        break;
-      }
-      
-      // optional int32 numChannels = 2;
-      case 2: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_numChannels:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
-                 input, &numchannels_)));
-          set_has_numchannels();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(24)) goto parse_sampleRate;
-        break;
-      }
-      
-      // optional int32 sampleRate = 3;
-      case 3: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_sampleRate:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
-                 input, &samplerate_)));
-          set_has_samplerate();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(32)) goto parse_hasEchoCount;
-        break;
-      }
-      
-      // optional int32 hasEchoCount = 4;
-      case 4: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_hasEchoCount:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
-                 input, &hasechocount_)));
-          set_has_hasechocount();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(40)) goto parse_hasVoiceCount;
-        break;
-      }
-      
-      // optional int32 hasVoiceCount = 5;
-      case 5: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_hasVoiceCount:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
-                 input, &hasvoicecount_)));
-          set_has_hasvoicecount();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(48)) goto parse_isSaturatedCount;
-        break;
-      }
-      
-      // optional int32 isSaturatedCount = 6;
-      case 6: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_isSaturatedCount:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
-                 input, &issaturatedcount_)));
-          set_has_issaturatedcount();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(58)) goto parse_echoMetrics;
-        break;
-      }
-      
-      // optional .audio_processing_unittest.Test.EchoMetrics echoMetrics = 7;
-      case 7: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
-         parse_echoMetrics:
-          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
-               input, mutable_echometrics()));
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectAtEnd()) return true;
-        break;
-      }
-      
-      default: {
-      handle_uninterpreted:
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
-          return true;
-        }
-        DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag, NULL));
-        break;
-      }
-    }
-  }
-  return true;
-#undef DO_
-}
-
-void Test::SerializeWithCachedSizes(
-    ::google::protobuf::io::CodedOutputStream* output) const {
-  // optional int32 numReverseChannels = 1;
-  if (has_numreversechannels()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->numreversechannels(), output);
-  }
-  
-  // optional int32 numChannels = 2;
-  if (has_numchannels()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt32(2, this->numchannels(), output);
-  }
-  
-  // optional int32 sampleRate = 3;
-  if (has_samplerate()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt32(3, this->samplerate(), output);
-  }
-  
-  // optional int32 hasEchoCount = 4;
-  if (has_hasechocount()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt32(4, this->hasechocount(), output);
-  }
-  
-  // optional int32 hasVoiceCount = 5;
-  if (has_hasvoicecount()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt32(5, this->hasvoicecount(), output);
-  }
-  
-  // optional int32 isSaturatedCount = 6;
-  if (has_issaturatedcount()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt32(6, this->issaturatedcount(), output);
-  }
-  
-  // optional .audio_processing_unittest.Test.EchoMetrics echoMetrics = 7;
-  if (has_echometrics()) {
-    ::google::protobuf::internal::WireFormatLite::WriteMessage(
-      7, this->echometrics(), output);
-  }
-  
-}
-
-int Test::ByteSize() const {
-  int total_size = 0;
-  
-  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    // optional int32 numReverseChannels = 1;
-    if (has_numreversechannels()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int32Size(
-          this->numreversechannels());
-    }
-    
-    // optional int32 numChannels = 2;
-    if (has_numchannels()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int32Size(
-          this->numchannels());
-    }
-    
-    // optional int32 sampleRate = 3;
-    if (has_samplerate()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int32Size(
-          this->samplerate());
-    }
-    
-    // optional int32 hasEchoCount = 4;
-    if (has_hasechocount()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int32Size(
-          this->hasechocount());
-    }
-    
-    // optional int32 hasVoiceCount = 5;
-    if (has_hasvoicecount()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int32Size(
-          this->hasvoicecount());
-    }
-    
-    // optional int32 isSaturatedCount = 6;
-    if (has_issaturatedcount()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int32Size(
-          this->issaturatedcount());
-    }
-    
-    // optional .audio_processing_unittest.Test.EchoMetrics echoMetrics = 7;
-    if (has_echometrics()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
-          this->echometrics());
-    }
-    
-  }
-  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
-  _cached_size_ = total_size;
-  GOOGLE_SAFE_CONCURRENT_WRITES_END();
-  return total_size;
-}
-
-void Test::CheckTypeAndMergeFrom(
-    const ::google::protobuf::MessageLite& from) {
-  MergeFrom(*::google::protobuf::down_cast<const Test*>(&from));
-}
-
-void Test::MergeFrom(const Test& from) {
-  GOOGLE_CHECK_NE(&from, this);
-  if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    if (from.has_numreversechannels()) {
-      set_numreversechannels(from.numreversechannels());
-    }
-    if (from.has_numchannels()) {
-      set_numchannels(from.numchannels());
-    }
-    if (from.has_samplerate()) {
-      set_samplerate(from.samplerate());
-    }
-    if (from.has_hasechocount()) {
-      set_hasechocount(from.hasechocount());
-    }
-    if (from.has_hasvoicecount()) {
-      set_hasvoicecount(from.hasvoicecount());
-    }
-    if (from.has_issaturatedcount()) {
-      set_issaturatedcount(from.issaturatedcount());
-    }
-    if (from.has_echometrics()) {
-      mutable_echometrics()->::audio_processing_unittest::Test_EchoMetrics::MergeFrom(from.echometrics());
-    }
-  }
-}
-
-void Test::CopyFrom(const Test& from) {
-  if (&from == this) return;
-  Clear();
-  MergeFrom(from);
-}
-
-bool Test::IsInitialized() const {
-  
-  return true;
-}
-
-void Test::Swap(Test* other) {
-  if (other != this) {
-    std::swap(numreversechannels_, other->numreversechannels_);
-    std::swap(numchannels_, other->numchannels_);
-    std::swap(samplerate_, other->samplerate_);
-    std::swap(hasechocount_, other->hasechocount_);
-    std::swap(hasvoicecount_, other->hasvoicecount_);
-    std::swap(issaturatedcount_, other->issaturatedcount_);
-    std::swap(echometrics_, other->echometrics_);
-    std::swap(_has_bits_[0], other->_has_bits_[0]);
-    std::swap(_cached_size_, other->_cached_size_);
-  }
-}
-
-::std::string Test::GetTypeName() const {
-  return "audio_processing_unittest.Test";
-}
-
-
-// ===================================================================
-
-#ifndef _MSC_VER
-const int OutputData::kTestFieldNumber;
-#endif  // !_MSC_VER
-
-OutputData::OutputData()
-  : ::google::protobuf::MessageLite() {
-  SharedCtor();
-}
-
-void OutputData::InitAsDefaultInstance() {
-}
-
-OutputData::OutputData(const OutputData& from)
-  : ::google::protobuf::MessageLite() {
-  SharedCtor();
-  MergeFrom(from);
-}
-
-void OutputData::SharedCtor() {
-  _cached_size_ = 0;
-  ::memset(_has_bits_, 0, sizeof(_has_bits_));
-}
-
-OutputData::~OutputData() {
-  SharedDtor();
-}
-
-void OutputData::SharedDtor() {
-  if (this != default_instance_) {
-  }
-}
-
-void OutputData::SetCachedSize(int size) const {
-  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
-  _cached_size_ = size;
-  GOOGLE_SAFE_CONCURRENT_WRITES_END();
-}
-const OutputData& OutputData::default_instance() {
-  if (default_instance_ == NULL) protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();  return *default_instance_;
-}
-
-OutputData* OutputData::default_instance_ = NULL;
-
-OutputData* OutputData::New() const {
-  return new OutputData;
-}
-
-void OutputData::Clear() {
-  test_.Clear();
-  ::memset(_has_bits_, 0, sizeof(_has_bits_));
-}
-
-bool OutputData::MergePartialFromCodedStream(
-    ::google::protobuf::io::CodedInputStream* input) {
-#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
-  ::google::protobuf::uint32 tag;
-  while ((tag = input->ReadTag()) != 0) {
-    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
-      // repeated .audio_processing_unittest.Test test = 1;
-      case 1: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
-         parse_test:
-          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
-                input, add_test()));
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(10)) goto parse_test;
-        if (input->ExpectAtEnd()) return true;
-        break;
-      }
-      
-      default: {
-      handle_uninterpreted:
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
-          return true;
-        }
-        DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag, NULL));
-        break;
-      }
-    }
-  }
-  return true;
-#undef DO_
-}
-
-void OutputData::SerializeWithCachedSizes(
-    ::google::protobuf::io::CodedOutputStream* output) const {
-  // repeated .audio_processing_unittest.Test test = 1;
-  for (int i = 0; i < this->test_size(); i++) {
-    ::google::protobuf::internal::WireFormatLite::WriteMessage(
-      1, this->test(i), output);
-  }
-  
-}
-
-int OutputData::ByteSize() const {
-  int total_size = 0;
-  
-  // repeated .audio_processing_unittest.Test test = 1;
-  total_size += 1 * this->test_size();
-  for (int i = 0; i < this->test_size(); i++) {
-    total_size +=
-      ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
-        this->test(i));
-  }
-  
-  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
-  _cached_size_ = total_size;
-  GOOGLE_SAFE_CONCURRENT_WRITES_END();
-  return total_size;
-}
-
-void OutputData::CheckTypeAndMergeFrom(
-    const ::google::protobuf::MessageLite& from) {
-  MergeFrom(*::google::protobuf::down_cast<const OutputData*>(&from));
-}
-
-void OutputData::MergeFrom(const OutputData& from) {
-  GOOGLE_CHECK_NE(&from, this);
-  test_.MergeFrom(from.test_);
-}
-
-void OutputData::CopyFrom(const OutputData& from) {
-  if (&from == this) return;
-  Clear();
-  MergeFrom(from);
-}
-
-bool OutputData::IsInitialized() const {
-  
-  return true;
-}
-
-void OutputData::Swap(OutputData* other) {
-  if (other != this) {
-    test_.Swap(&other->test_);
-    std::swap(_has_bits_[0], other->_has_bits_[0]);
-    std::swap(_cached_size_, other->_cached_size_);
-  }
-}
-
-::std::string OutputData::GetTypeName() const {
-  return "audio_processing_unittest.OutputData";
-}
-
-
-// @@protoc_insertion_point(namespace_scope)
-
-}  // namespace audio_processing_unittest
-
-// @@protoc_insertion_point(global_scope)
diff --git a/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.h b/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.h
deleted file mode 100644
index 34c21b2..0000000
--- a/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.h
+++ /dev/null
@@ -1,862 +0,0 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: audio_processing_unittest.proto
-
-#ifndef PROTOBUF_audio_5fprocessing_5funittest_2eproto__INCLUDED
-#define PROTOBUF_audio_5fprocessing_5funittest_2eproto__INCLUDED
-
-#include <string>
-
-#include <google/protobuf/stubs/common.h>
-
-#if GOOGLE_PROTOBUF_VERSION < 2004000
-#error This file was generated by a newer version of protoc which is
-#error incompatible with your Protocol Buffer headers.  Please update
-#error your headers.
-#endif
-#if 2004000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
-#error This file was generated by an older version of protoc which is
-#error incompatible with your Protocol Buffer headers.  Please
-#error regenerate this file with a newer version of protoc.
-#endif
-
-#include <google/protobuf/generated_message_util.h>
-#include <google/protobuf/repeated_field.h>
-#include <google/protobuf/extension_set.h>
-// @@protoc_insertion_point(includes)
-
-namespace audio_processing_unittest {
-
-// Internal implementation detail -- do not call these.
-void  protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
-void protobuf_AssignDesc_audio_5fprocessing_5funittest_2eproto();
-void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto();
-
-class Test;
-class Test_Statistic;
-class Test_EchoMetrics;
-class OutputData;
-
-// ===================================================================
-
-class Test_Statistic : public ::google::protobuf::MessageLite {
- public:
-  Test_Statistic();
-  virtual ~Test_Statistic();
-  
-  Test_Statistic(const Test_Statistic& from);
-  
-  inline Test_Statistic& operator=(const Test_Statistic& from) {
-    CopyFrom(from);
-    return *this;
-  }
-  
-  static const Test_Statistic& default_instance();
-  
-  void Swap(Test_Statistic* other);
-  
-  // implements Message ----------------------------------------------
-  
-  Test_Statistic* New() const;
-  void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from);
-  void CopyFrom(const Test_Statistic& from);
-  void MergeFrom(const Test_Statistic& from);
-  void Clear();
-  bool IsInitialized() const;
-  
-  int ByteSize() const;
-  bool MergePartialFromCodedStream(
-      ::google::protobuf::io::CodedInputStream* input);
-  void SerializeWithCachedSizes(
-      ::google::protobuf::io::CodedOutputStream* output) const;
-  int GetCachedSize() const { return _cached_size_; }
-  private:
-  void SharedCtor();
-  void SharedDtor();
-  void SetCachedSize(int size) const;
-  public:
-  
-  ::std::string GetTypeName() const;
-  
-  // nested types ----------------------------------------------------
-  
-  // accessors -------------------------------------------------------
-  
-  // optional int32 instant = 1;
-  inline bool has_instant() const;
-  inline void clear_instant();
-  static const int kInstantFieldNumber = 1;
-  inline ::google::protobuf::int32 instant() const;
-  inline void set_instant(::google::protobuf::int32 value);
-  
-  // optional int32 average = 2;
-  inline bool has_average() const;
-  inline void clear_average();
-  static const int kAverageFieldNumber = 2;
-  inline ::google::protobuf::int32 average() const;
-  inline void set_average(::google::protobuf::int32 value);
-  
-  // optional int32 maximum = 3;
-  inline bool has_maximum() const;
-  inline void clear_maximum();
-  static const int kMaximumFieldNumber = 3;
-  inline ::google::protobuf::int32 maximum() const;
-  inline void set_maximum(::google::protobuf::int32 value);
-  
-  // optional int32 minimum = 4;
-  inline bool has_minimum() const;
-  inline void clear_minimum();
-  static const int kMinimumFieldNumber = 4;
-  inline ::google::protobuf::int32 minimum() const;
-  inline void set_minimum(::google::protobuf::int32 value);
-  
-  // @@protoc_insertion_point(class_scope:audio_processing_unittest.Test.Statistic)
- private:
-  inline void set_has_instant();
-  inline void clear_has_instant();
-  inline void set_has_average();
-  inline void clear_has_average();
-  inline void set_has_maximum();
-  inline void clear_has_maximum();
-  inline void set_has_minimum();
-  inline void clear_has_minimum();
-  
-  ::google::protobuf::int32 instant_;
-  ::google::protobuf::int32 average_;
-  ::google::protobuf::int32 maximum_;
-  ::google::protobuf::int32 minimum_;
-  
-  mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(4 + 31) / 32];
-  
-  friend void  protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
-  friend void protobuf_AssignDesc_audio_5fprocessing_5funittest_2eproto();
-  friend void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto();
-  
-  void InitAsDefaultInstance();
-  static Test_Statistic* default_instance_;
-};
-// -------------------------------------------------------------------
-
-class Test_EchoMetrics : public ::google::protobuf::MessageLite {
- public:
-  Test_EchoMetrics();
-  virtual ~Test_EchoMetrics();
-  
-  Test_EchoMetrics(const Test_EchoMetrics& from);
-  
-  inline Test_EchoMetrics& operator=(const Test_EchoMetrics& from) {
-    CopyFrom(from);
-    return *this;
-  }
-  
-  static const Test_EchoMetrics& default_instance();
-  
-  void Swap(Test_EchoMetrics* other);
-  
-  // implements Message ----------------------------------------------
-  
-  Test_EchoMetrics* New() const;
-  void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from);
-  void CopyFrom(const Test_EchoMetrics& from);
-  void MergeFrom(const Test_EchoMetrics& from);
-  void Clear();
-  bool IsInitialized() const;
-  
-  int ByteSize() const;
-  bool MergePartialFromCodedStream(
-      ::google::protobuf::io::CodedInputStream* input);
-  void SerializeWithCachedSizes(
-      ::google::protobuf::io::CodedOutputStream* output) const;
-  int GetCachedSize() const { return _cached_size_; }
-  private:
-  void SharedCtor();
-  void SharedDtor();
-  void SetCachedSize(int size) const;
-  public:
-  
-  ::std::string GetTypeName() const;
-  
-  // nested types ----------------------------------------------------
-  
-  // accessors -------------------------------------------------------
-  
-  // optional .audio_processing_unittest.Test.Statistic residualEchoReturnLoss = 1;
-  inline bool has_residualechoreturnloss() const;
-  inline void clear_residualechoreturnloss();
-  static const int kResidualEchoReturnLossFieldNumber = 1;
-  inline const ::audio_processing_unittest::Test_Statistic& residualechoreturnloss() const;
-  inline ::audio_processing_unittest::Test_Statistic* mutable_residualechoreturnloss();
-  inline ::audio_processing_unittest::Test_Statistic* release_residualechoreturnloss();
-  
-  // optional .audio_processing_unittest.Test.Statistic echoReturnLoss = 2;
-  inline bool has_echoreturnloss() const;
-  inline void clear_echoreturnloss();
-  static const int kEchoReturnLossFieldNumber = 2;
-  inline const ::audio_processing_unittest::Test_Statistic& echoreturnloss() const;
-  inline ::audio_processing_unittest::Test_Statistic* mutable_echoreturnloss();
-  inline ::audio_processing_unittest::Test_Statistic* release_echoreturnloss();
-  
-  // optional .audio_processing_unittest.Test.Statistic echoReturnLossEnhancement = 3;
-  inline bool has_echoreturnlossenhancement() const;
-  inline void clear_echoreturnlossenhancement();
-  static const int kEchoReturnLossEnhancementFieldNumber = 3;
-  inline const ::audio_processing_unittest::Test_Statistic& echoreturnlossenhancement() const;
-  inline ::audio_processing_unittest::Test_Statistic* mutable_echoreturnlossenhancement();
-  inline ::audio_processing_unittest::Test_Statistic* release_echoreturnlossenhancement();
-  
-  // optional .audio_processing_unittest.Test.Statistic aNlp = 4;
-  inline bool has_anlp() const;
-  inline void clear_anlp();
-  static const int kANlpFieldNumber = 4;
-  inline const ::audio_processing_unittest::Test_Statistic& anlp() const;
-  inline ::audio_processing_unittest::Test_Statistic* mutable_anlp();
-  inline ::audio_processing_unittest::Test_Statistic* release_anlp();
-  
-  // @@protoc_insertion_point(class_scope:audio_processing_unittest.Test.EchoMetrics)
- private:
-  inline void set_has_residualechoreturnloss();
-  inline void clear_has_residualechoreturnloss();
-  inline void set_has_echoreturnloss();
-  inline void clear_has_echoreturnloss();
-  inline void set_has_echoreturnlossenhancement();
-  inline void clear_has_echoreturnlossenhancement();
-  inline void set_has_anlp();
-  inline void clear_has_anlp();
-  
-  ::audio_processing_unittest::Test_Statistic* residualechoreturnloss_;
-  ::audio_processing_unittest::Test_Statistic* echoreturnloss_;
-  ::audio_processing_unittest::Test_Statistic* echoreturnlossenhancement_;
-  ::audio_processing_unittest::Test_Statistic* anlp_;
-  
-  mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(4 + 31) / 32];
-  
-  friend void  protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
-  friend void protobuf_AssignDesc_audio_5fprocessing_5funittest_2eproto();
-  friend void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto();
-  
-  void InitAsDefaultInstance();
-  static Test_EchoMetrics* default_instance_;
-};
-// -------------------------------------------------------------------
-
-class Test : public ::google::protobuf::MessageLite {
- public:
-  Test();
-  virtual ~Test();
-  
-  Test(const Test& from);
-  
-  inline Test& operator=(const Test& from) {
-    CopyFrom(from);
-    return *this;
-  }
-  
-  static const Test& default_instance();
-  
-  void Swap(Test* other);
-  
-  // implements Message ----------------------------------------------
-  
-  Test* New() const;
-  void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from);
-  void CopyFrom(const Test& from);
-  void MergeFrom(const Test& from);
-  void Clear();
-  bool IsInitialized() const;
-  
-  int ByteSize() const;
-  bool MergePartialFromCodedStream(
-      ::google::protobuf::io::CodedInputStream* input);
-  void SerializeWithCachedSizes(
-      ::google::protobuf::io::CodedOutputStream* output) const;
-  int GetCachedSize() const { return _cached_size_; }
-  private:
-  void SharedCtor();
-  void SharedDtor();
-  void SetCachedSize(int size) const;
-  public:
-  
-  ::std::string GetTypeName() const;
-  
-  // nested types ----------------------------------------------------
-  
-  typedef Test_Statistic Statistic;
-  typedef Test_EchoMetrics EchoMetrics;
-  
-  // accessors -------------------------------------------------------
-  
-  // optional int32 numReverseChannels = 1;
-  inline bool has_numreversechannels() const;
-  inline void clear_numreversechannels();
-  static const int kNumReverseChannelsFieldNumber = 1;
-  inline ::google::protobuf::int32 numreversechannels() const;
-  inline void set_numreversechannels(::google::protobuf::int32 value);
-  
-  // optional int32 numChannels = 2;
-  inline bool has_numchannels() const;
-  inline void clear_numchannels();
-  static const int kNumChannelsFieldNumber = 2;
-  inline ::google::protobuf::int32 numchannels() const;
-  inline void set_numchannels(::google::protobuf::int32 value);
-  
-  // optional int32 sampleRate = 3;
-  inline bool has_samplerate() const;
-  inline void clear_samplerate();
-  static const int kSampleRateFieldNumber = 3;
-  inline ::google::protobuf::int32 samplerate() const;
-  inline void set_samplerate(::google::protobuf::int32 value);
-  
-  // optional int32 hasEchoCount = 4;
-  inline bool has_hasechocount() const;
-  inline void clear_hasechocount();
-  static const int kHasEchoCountFieldNumber = 4;
-  inline ::google::protobuf::int32 hasechocount() const;
-  inline void set_hasechocount(::google::protobuf::int32 value);
-  
-  // optional int32 hasVoiceCount = 5;
-  inline bool has_hasvoicecount() const;
-  inline void clear_hasvoicecount();
-  static const int kHasVoiceCountFieldNumber = 5;
-  inline ::google::protobuf::int32 hasvoicecount() const;
-  inline void set_hasvoicecount(::google::protobuf::int32 value);
-  
-  // optional int32 isSaturatedCount = 6;
-  inline bool has_issaturatedcount() const;
-  inline void clear_issaturatedcount();
-  static const int kIsSaturatedCountFieldNumber = 6;
-  inline ::google::protobuf::int32 issaturatedcount() const;
-  inline void set_issaturatedcount(::google::protobuf::int32 value);
-  
-  // optional .audio_processing_unittest.Test.EchoMetrics echoMetrics = 7;
-  inline bool has_echometrics() const;
-  inline void clear_echometrics();
-  static const int kEchoMetricsFieldNumber = 7;
-  inline const ::audio_processing_unittest::Test_EchoMetrics& echometrics() const;
-  inline ::audio_processing_unittest::Test_EchoMetrics* mutable_echometrics();
-  inline ::audio_processing_unittest::Test_EchoMetrics* release_echometrics();
-  
-  // @@protoc_insertion_point(class_scope:audio_processing_unittest.Test)
- private:
-  inline void set_has_numreversechannels();
-  inline void clear_has_numreversechannels();
-  inline void set_has_numchannels();
-  inline void clear_has_numchannels();
-  inline void set_has_samplerate();
-  inline void clear_has_samplerate();
-  inline void set_has_hasechocount();
-  inline void clear_has_hasechocount();
-  inline void set_has_hasvoicecount();
-  inline void clear_has_hasvoicecount();
-  inline void set_has_issaturatedcount();
-  inline void clear_has_issaturatedcount();
-  inline void set_has_echometrics();
-  inline void clear_has_echometrics();
-  
-  ::google::protobuf::int32 numreversechannels_;
-  ::google::protobuf::int32 numchannels_;
-  ::google::protobuf::int32 samplerate_;
-  ::google::protobuf::int32 hasechocount_;
-  ::google::protobuf::int32 hasvoicecount_;
-  ::google::protobuf::int32 issaturatedcount_;
-  ::audio_processing_unittest::Test_EchoMetrics* echometrics_;
-  
-  mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(7 + 31) / 32];
-  
-  friend void  protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
-  friend void protobuf_AssignDesc_audio_5fprocessing_5funittest_2eproto();
-  friend void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto();
-  
-  void InitAsDefaultInstance();
-  static Test* default_instance_;
-};
-// -------------------------------------------------------------------
-
-class OutputData : public ::google::protobuf::MessageLite {
- public:
-  OutputData();
-  virtual ~OutputData();
-  
-  OutputData(const OutputData& from);
-  
-  inline OutputData& operator=(const OutputData& from) {
-    CopyFrom(from);
-    return *this;
-  }
-  
-  static const OutputData& default_instance();
-  
-  void Swap(OutputData* other);
-  
-  // implements Message ----------------------------------------------
-  
-  OutputData* New() const;
-  void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from);
-  void CopyFrom(const OutputData& from);
-  void MergeFrom(const OutputData& from);
-  void Clear();
-  bool IsInitialized() const;
-  
-  int ByteSize() const;
-  bool MergePartialFromCodedStream(
-      ::google::protobuf::io::CodedInputStream* input);
-  void SerializeWithCachedSizes(
-      ::google::protobuf::io::CodedOutputStream* output) const;
-  int GetCachedSize() const { return _cached_size_; }
-  private:
-  void SharedCtor();
-  void SharedDtor();
-  void SetCachedSize(int size) const;
-  public:
-  
-  ::std::string GetTypeName() const;
-  
-  // nested types ----------------------------------------------------
-  
-  // accessors -------------------------------------------------------
-  
-  // repeated .audio_processing_unittest.Test test = 1;
-  inline int test_size() const;
-  inline void clear_test();
-  static const int kTestFieldNumber = 1;
-  inline const ::audio_processing_unittest::Test& test(int index) const;
-  inline ::audio_processing_unittest::Test* mutable_test(int index);
-  inline ::audio_processing_unittest::Test* add_test();
-  inline const ::google::protobuf::RepeatedPtrField< ::audio_processing_unittest::Test >&
-      test() const;
-  inline ::google::protobuf::RepeatedPtrField< ::audio_processing_unittest::Test >*
-      mutable_test();
-  
-  // @@protoc_insertion_point(class_scope:audio_processing_unittest.OutputData)
- private:
-  
-  ::google::protobuf::RepeatedPtrField< ::audio_processing_unittest::Test > test_;
-  
-  mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(1 + 31) / 32];
-  
-  friend void  protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
-  friend void protobuf_AssignDesc_audio_5fprocessing_5funittest_2eproto();
-  friend void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto();
-  
-  void InitAsDefaultInstance();
-  static OutputData* default_instance_;
-};
-// ===================================================================
-
-
-// ===================================================================
-
-// Test_Statistic
-
-// optional int32 instant = 1;
-inline bool Test_Statistic::has_instant() const {
-  return (_has_bits_[0] & 0x00000001u) != 0;
-}
-inline void Test_Statistic::set_has_instant() {
-  _has_bits_[0] |= 0x00000001u;
-}
-inline void Test_Statistic::clear_has_instant() {
-  _has_bits_[0] &= ~0x00000001u;
-}
-inline void Test_Statistic::clear_instant() {
-  instant_ = 0;
-  clear_has_instant();
-}
-inline ::google::protobuf::int32 Test_Statistic::instant() const {
-  return instant_;
-}
-inline void Test_Statistic::set_instant(::google::protobuf::int32 value) {
-  set_has_instant();
-  instant_ = value;
-}
-
-// optional int32 average = 2;
-inline bool Test_Statistic::has_average() const {
-  return (_has_bits_[0] & 0x00000002u) != 0;
-}
-inline void Test_Statistic::set_has_average() {
-  _has_bits_[0] |= 0x00000002u;
-}
-inline void Test_Statistic::clear_has_average() {
-  _has_bits_[0] &= ~0x00000002u;
-}
-inline void Test_Statistic::clear_average() {
-  average_ = 0;
-  clear_has_average();
-}
-inline ::google::protobuf::int32 Test_Statistic::average() const {
-  return average_;
-}
-inline void Test_Statistic::set_average(::google::protobuf::int32 value) {
-  set_has_average();
-  average_ = value;
-}
-
-// optional int32 maximum = 3;
-inline bool Test_Statistic::has_maximum() const {
-  return (_has_bits_[0] & 0x00000004u) != 0;
-}
-inline void Test_Statistic::set_has_maximum() {
-  _has_bits_[0] |= 0x00000004u;
-}
-inline void Test_Statistic::clear_has_maximum() {
-  _has_bits_[0] &= ~0x00000004u;
-}
-inline void Test_Statistic::clear_maximum() {
-  maximum_ = 0;
-  clear_has_maximum();
-}
-inline ::google::protobuf::int32 Test_Statistic::maximum() const {
-  return maximum_;
-}
-inline void Test_Statistic::set_maximum(::google::protobuf::int32 value) {
-  set_has_maximum();
-  maximum_ = value;
-}
-
-// optional int32 minimum = 4;
-inline bool Test_Statistic::has_minimum() const {
-  return (_has_bits_[0] & 0x00000008u) != 0;
-}
-inline void Test_Statistic::set_has_minimum() {
-  _has_bits_[0] |= 0x00000008u;
-}
-inline void Test_Statistic::clear_has_minimum() {
-  _has_bits_[0] &= ~0x00000008u;
-}
-inline void Test_Statistic::clear_minimum() {
-  minimum_ = 0;
-  clear_has_minimum();
-}
-inline ::google::protobuf::int32 Test_Statistic::minimum() const {
-  return minimum_;
-}
-inline void Test_Statistic::set_minimum(::google::protobuf::int32 value) {
-  set_has_minimum();
-  minimum_ = value;
-}
-
-// -------------------------------------------------------------------
-
-// Test_EchoMetrics
-
-// optional .audio_processing_unittest.Test.Statistic residualEchoReturnLoss = 1;
-inline bool Test_EchoMetrics::has_residualechoreturnloss() const {
-  return (_has_bits_[0] & 0x00000001u) != 0;
-}
-inline void Test_EchoMetrics::set_has_residualechoreturnloss() {
-  _has_bits_[0] |= 0x00000001u;
-}
-inline void Test_EchoMetrics::clear_has_residualechoreturnloss() {
-  _has_bits_[0] &= ~0x00000001u;
-}
-inline void Test_EchoMetrics::clear_residualechoreturnloss() {
-  if (residualechoreturnloss_ != NULL) residualechoreturnloss_->::audio_processing_unittest::Test_Statistic::Clear();
-  clear_has_residualechoreturnloss();
-}
-inline const ::audio_processing_unittest::Test_Statistic& Test_EchoMetrics::residualechoreturnloss() const {
-  return residualechoreturnloss_ != NULL ? *residualechoreturnloss_ : *default_instance_->residualechoreturnloss_;
-}
-inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::mutable_residualechoreturnloss() {
-  set_has_residualechoreturnloss();
-  if (residualechoreturnloss_ == NULL) residualechoreturnloss_ = new ::audio_processing_unittest::Test_Statistic;
-  return residualechoreturnloss_;
-}
-inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::release_residualechoreturnloss() {
-  clear_has_residualechoreturnloss();
-  ::audio_processing_unittest::Test_Statistic* temp = residualechoreturnloss_;
-  residualechoreturnloss_ = NULL;
-  return temp;
-}
-
-// optional .audio_processing_unittest.Test.Statistic echoReturnLoss = 2;
-inline bool Test_EchoMetrics::has_echoreturnloss() const {
-  return (_has_bits_[0] & 0x00000002u) != 0;
-}
-inline void Test_EchoMetrics::set_has_echoreturnloss() {
-  _has_bits_[0] |= 0x00000002u;
-}
-inline void Test_EchoMetrics::clear_has_echoreturnloss() {
-  _has_bits_[0] &= ~0x00000002u;
-}
-inline void Test_EchoMetrics::clear_echoreturnloss() {
-  if (echoreturnloss_ != NULL) echoreturnloss_->::audio_processing_unittest::Test_Statistic::Clear();
-  clear_has_echoreturnloss();
-}
-inline const ::audio_processing_unittest::Test_Statistic& Test_EchoMetrics::echoreturnloss() const {
-  return echoreturnloss_ != NULL ? *echoreturnloss_ : *default_instance_->echoreturnloss_;
-}
-inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::mutable_echoreturnloss() {
-  set_has_echoreturnloss();
-  if (echoreturnloss_ == NULL) echoreturnloss_ = new ::audio_processing_unittest::Test_Statistic;
-  return echoreturnloss_;
-}
-inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::release_echoreturnloss() {
-  clear_has_echoreturnloss();
-  ::audio_processing_unittest::Test_Statistic* temp = echoreturnloss_;
-  echoreturnloss_ = NULL;
-  return temp;
-}
-
-// optional .audio_processing_unittest.Test.Statistic echoReturnLossEnhancement = 3;
-inline bool Test_EchoMetrics::has_echoreturnlossenhancement() const {
-  return (_has_bits_[0] & 0x00000004u) != 0;
-}
-inline void Test_EchoMetrics::set_has_echoreturnlossenhancement() {
-  _has_bits_[0] |= 0x00000004u;
-}
-inline void Test_EchoMetrics::clear_has_echoreturnlossenhancement() {
-  _has_bits_[0] &= ~0x00000004u;
-}
-inline void Test_EchoMetrics::clear_echoreturnlossenhancement() {
-  if (echoreturnlossenhancement_ != NULL) echoreturnlossenhancement_->::audio_processing_unittest::Test_Statistic::Clear();
-  clear_has_echoreturnlossenhancement();
-}
-inline const ::audio_processing_unittest::Test_Statistic& Test_EchoMetrics::echoreturnlossenhancement() const {
-  return echoreturnlossenhancement_ != NULL ? *echoreturnlossenhancement_ : *default_instance_->echoreturnlossenhancement_;
-}
-inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::mutable_echoreturnlossenhancement() {
-  set_has_echoreturnlossenhancement();
-  if (echoreturnlossenhancement_ == NULL) echoreturnlossenhancement_ = new ::audio_processing_unittest::Test_Statistic;
-  return echoreturnlossenhancement_;
-}
-inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::release_echoreturnlossenhancement() {
-  clear_has_echoreturnlossenhancement();
-  ::audio_processing_unittest::Test_Statistic* temp = echoreturnlossenhancement_;
-  echoreturnlossenhancement_ = NULL;
-  return temp;
-}
-
-// optional .audio_processing_unittest.Test.Statistic aNlp = 4;
-inline bool Test_EchoMetrics::has_anlp() const {
-  return (_has_bits_[0] & 0x00000008u) != 0;
-}
-inline void Test_EchoMetrics::set_has_anlp() {
-  _has_bits_[0] |= 0x00000008u;
-}
-inline void Test_EchoMetrics::clear_has_anlp() {
-  _has_bits_[0] &= ~0x00000008u;
-}
-inline void Test_EchoMetrics::clear_anlp() {
-  if (anlp_ != NULL) anlp_->::audio_processing_unittest::Test_Statistic::Clear();
-  clear_has_anlp();
-}
-inline const ::audio_processing_unittest::Test_Statistic& Test_EchoMetrics::anlp() const {
-  return anlp_ != NULL ? *anlp_ : *default_instance_->anlp_;
-}
-inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::mutable_anlp() {
-  set_has_anlp();
-  if (anlp_ == NULL) anlp_ = new ::audio_processing_unittest::Test_Statistic;
-  return anlp_;
-}
-inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::release_anlp() {
-  clear_has_anlp();
-  ::audio_processing_unittest::Test_Statistic* temp = anlp_;
-  anlp_ = NULL;
-  return temp;
-}
-
-// -------------------------------------------------------------------
-
-// Test
-
-// optional int32 numReverseChannels = 1;
-inline bool Test::has_numreversechannels() const {
-  return (_has_bits_[0] & 0x00000001u) != 0;
-}
-inline void Test::set_has_numreversechannels() {
-  _has_bits_[0] |= 0x00000001u;
-}
-inline void Test::clear_has_numreversechannels() {
-  _has_bits_[0] &= ~0x00000001u;
-}
-inline void Test::clear_numreversechannels() {
-  numreversechannels_ = 0;
-  clear_has_numreversechannels();
-}
-inline ::google::protobuf::int32 Test::numreversechannels() const {
-  return numreversechannels_;
-}
-inline void Test::set_numreversechannels(::google::protobuf::int32 value) {
-  set_has_numreversechannels();
-  numreversechannels_ = value;
-}
-
-// optional int32 numChannels = 2;
-inline bool Test::has_numchannels() const {
-  return (_has_bits_[0] & 0x00000002u) != 0;
-}
-inline void Test::set_has_numchannels() {
-  _has_bits_[0] |= 0x00000002u;
-}
-inline void Test::clear_has_numchannels() {
-  _has_bits_[0] &= ~0x00000002u;
-}
-inline void Test::clear_numchannels() {
-  numchannels_ = 0;
-  clear_has_numchannels();
-}
-inline ::google::protobuf::int32 Test::numchannels() const {
-  return numchannels_;
-}
-inline void Test::set_numchannels(::google::protobuf::int32 value) {
-  set_has_numchannels();
-  numchannels_ = value;
-}
-
-// optional int32 sampleRate = 3;
-inline bool Test::has_samplerate() const {
-  return (_has_bits_[0] & 0x00000004u) != 0;
-}
-inline void Test::set_has_samplerate() {
-  _has_bits_[0] |= 0x00000004u;
-}
-inline void Test::clear_has_samplerate() {
-  _has_bits_[0] &= ~0x00000004u;
-}
-inline void Test::clear_samplerate() {
-  samplerate_ = 0;
-  clear_has_samplerate();
-}
-inline ::google::protobuf::int32 Test::samplerate() const {
-  return samplerate_;
-}
-inline void Test::set_samplerate(::google::protobuf::int32 value) {
-  set_has_samplerate();
-  samplerate_ = value;
-}
-
-// optional int32 hasEchoCount = 4;
-inline bool Test::has_hasechocount() const {
-  return (_has_bits_[0] & 0x00000008u) != 0;
-}
-inline void Test::set_has_hasechocount() {
-  _has_bits_[0] |= 0x00000008u;
-}
-inline void Test::clear_has_hasechocount() {
-  _has_bits_[0] &= ~0x00000008u;
-}
-inline void Test::clear_hasechocount() {
-  hasechocount_ = 0;
-  clear_has_hasechocount();
-}
-inline ::google::protobuf::int32 Test::hasechocount() const {
-  return hasechocount_;
-}
-inline void Test::set_hasechocount(::google::protobuf::int32 value) {
-  set_has_hasechocount();
-  hasechocount_ = value;
-}
-
-// optional int32 hasVoiceCount = 5;
-inline bool Test::has_hasvoicecount() const {
-  return (_has_bits_[0] & 0x00000010u) != 0;
-}
-inline void Test::set_has_hasvoicecount() {
-  _has_bits_[0] |= 0x00000010u;
-}
-inline void Test::clear_has_hasvoicecount() {
-  _has_bits_[0] &= ~0x00000010u;
-}
-inline void Test::clear_hasvoicecount() {
-  hasvoicecount_ = 0;
-  clear_has_hasvoicecount();
-}
-inline ::google::protobuf::int32 Test::hasvoicecount() const {
-  return hasvoicecount_;
-}
-inline void Test::set_hasvoicecount(::google::protobuf::int32 value) {
-  set_has_hasvoicecount();
-  hasvoicecount_ = value;
-}
-
-// optional int32 isSaturatedCount = 6;
-inline bool Test::has_issaturatedcount() const {
-  return (_has_bits_[0] & 0x00000020u) != 0;
-}
-inline void Test::set_has_issaturatedcount() {
-  _has_bits_[0] |= 0x00000020u;
-}
-inline void Test::clear_has_issaturatedcount() {
-  _has_bits_[0] &= ~0x00000020u;
-}
-inline void Test::clear_issaturatedcount() {
-  issaturatedcount_ = 0;
-  clear_has_issaturatedcount();
-}
-inline ::google::protobuf::int32 Test::issaturatedcount() const {
-  return issaturatedcount_;
-}
-inline void Test::set_issaturatedcount(::google::protobuf::int32 value) {
-  set_has_issaturatedcount();
-  issaturatedcount_ = value;
-}
-
-// optional .audio_processing_unittest.Test.EchoMetrics echoMetrics = 7;
-inline bool Test::has_echometrics() const {
-  return (_has_bits_[0] & 0x00000040u) != 0;
-}
-inline void Test::set_has_echometrics() {
-  _has_bits_[0] |= 0x00000040u;
-}
-inline void Test::clear_has_echometrics() {
-  _has_bits_[0] &= ~0x00000040u;
-}
-inline void Test::clear_echometrics() {
-  if (echometrics_ != NULL) echometrics_->::audio_processing_unittest::Test_EchoMetrics::Clear();
-  clear_has_echometrics();
-}
-inline const ::audio_processing_unittest::Test_EchoMetrics& Test::echometrics() const {
-  return echometrics_ != NULL ? *echometrics_ : *default_instance_->echometrics_;
-}
-inline ::audio_processing_unittest::Test_EchoMetrics* Test::mutable_echometrics() {
-  set_has_echometrics();
-  if (echometrics_ == NULL) echometrics_ = new ::audio_processing_unittest::Test_EchoMetrics;
-  return echometrics_;
-}
-inline ::audio_processing_unittest::Test_EchoMetrics* Test::release_echometrics() {
-  clear_has_echometrics();
-  ::audio_processing_unittest::Test_EchoMetrics* temp = echometrics_;
-  echometrics_ = NULL;
-  return temp;
-}
-
-// -------------------------------------------------------------------
-
-// OutputData
-
-// repeated .audio_processing_unittest.Test test = 1;
-inline int OutputData::test_size() const {
-  return test_.size();
-}
-inline void OutputData::clear_test() {
-  test_.Clear();
-}
-inline const ::audio_processing_unittest::Test& OutputData::test(int index) const {
-  return test_.Get(index);
-}
-inline ::audio_processing_unittest::Test* OutputData::mutable_test(int index) {
-  return test_.Mutable(index);
-}
-inline ::audio_processing_unittest::Test* OutputData::add_test() {
-  return test_.Add();
-}
-inline const ::google::protobuf::RepeatedPtrField< ::audio_processing_unittest::Test >&
-OutputData::test() const {
-  return test_;
-}
-inline ::google::protobuf::RepeatedPtrField< ::audio_processing_unittest::Test >*
-OutputData::mutable_test() {
-  return &test_;
-}
-
-
-// @@protoc_insertion_point(namespace_scope)
-
-}  // namespace audio_processing_unittest
-
-// @@protoc_insertion_point(global_scope)
-
-#endif  // PROTOBUF_audio_5fprocessing_5funittest_2eproto__INCLUDED
diff --git a/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.proto b/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.proto
deleted file mode 100644
index 8520e64..0000000
--- a/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.proto
+++ /dev/null
@@ -1,33 +0,0 @@
-package audio_processing_unittest;
-option optimize_for = LITE_RUNTIME;
-
-message Test {
-  optional int32 numReverseChannels = 1;
-  optional int32 numChannels = 2;
-  optional int32 sampleRate = 3;
-
-  optional int32 hasEchoCount = 4;
-  optional int32 hasVoiceCount = 5;
-  optional int32 isSaturatedCount = 6;
-
-  message Statistic {
-    optional int32 instant = 1;
-    optional int32 average = 2;
-    optional int32 maximum = 3;
-    optional int32 minimum = 4;
-  }
-
-  message EchoMetrics {
-    optional Statistic residualEchoReturnLoss = 1;
-    optional Statistic echoReturnLoss = 2;
-    optional Statistic echoReturnLossEnhancement = 3;
-    optional Statistic aNlp = 4;
-  }
-
-  optional EchoMetrics echoMetrics = 7;
-}
-
-message OutputData {
-  repeated Test test = 1;
-}
-
diff --git a/src/modules/audio_processing/main/test/unit_test/unit_test.cc b/src/modules/audio_processing/main/test/unit_test/unit_test.cc
deleted file mode 100644
index 3a6fce5..0000000
--- a/src/modules/audio_processing/main/test/unit_test/unit_test.cc
+++ /dev/null
@@ -1,881 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <cstdio>
-
-#include <gtest/gtest.h>
-
-#include "audio_processing.h"
-#include "audio_processing_unittest.pb.h"
-#include "event_wrapper.h"
-#include "module_common_types.h"
-#include "thread_wrapper.h"
-#include "trace.h"
-#include "signal_processing_library.h"
-
-using webrtc::AudioProcessing;
-using webrtc::AudioFrame;
-using webrtc::GainControl;
-using webrtc::NoiseSuppression;
-using webrtc::EchoCancellation;
-using webrtc::EventWrapper;
-using webrtc::Trace;
-using webrtc::LevelEstimator;
-using webrtc::EchoCancellation;
-using webrtc::EchoControlMobile;
-using webrtc::VoiceDetection;
-
-namespace {
-// When true, this will compare the output data with the results stored to
-// file. This is the typical case. When the file should be updated, it can
-// be set to false with the command-line switch --write_output_data.
-bool global_read_output_data = true;
-
-class ApmEnvironment : public ::testing::Environment {
- public:
-  virtual void SetUp() {
-    Trace::CreateTrace();
-    ASSERT_EQ(0, Trace::SetTraceFile("apm_trace.txt"));
-  }
-
-  virtual void TearDown() {
-    Trace::ReturnTrace();
-  }
-};
-
-class ApmTest : public ::testing::Test {
- protected:
-  ApmTest();
-  virtual void SetUp();
-  virtual void TearDown();
-
-  webrtc::AudioProcessing* apm_;
-  webrtc::AudioFrame* frame_;
-  webrtc::AudioFrame* revframe_;
-  FILE* far_file_;
-  FILE* near_file_;
-  bool update_output_data_;
-};
-
-ApmTest::ApmTest()
-    : apm_(NULL),
-      far_file_(NULL),
-      near_file_(NULL),
-      frame_(NULL),
-      revframe_(NULL) {}
-
-void ApmTest::SetUp() {
-  apm_ = AudioProcessing::Create(0);
-  ASSERT_TRUE(apm_ != NULL);
-
-  frame_ = new AudioFrame();
-  revframe_ = new AudioFrame();
-
-  ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
-  ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(2, 2));
-  ASSERT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(2));
-
-  frame_->_payloadDataLengthInSamples = 320;
-  frame_->_audioChannel = 2;
-  frame_->_frequencyInHz = 32000;
-  revframe_->_payloadDataLengthInSamples = 320;
-  revframe_->_audioChannel = 2;
-  revframe_->_frequencyInHz = 32000;
-
-  far_file_ = fopen("aec_far.pcm", "rb");
-  ASSERT_TRUE(far_file_ != NULL) << "Could not open input file aec_far.pcm\n";
-  near_file_ = fopen("aec_near.pcm", "rb");
-  ASSERT_TRUE(near_file_ != NULL) << "Could not open input file aec_near.pcm\n";
-}
-
-void ApmTest::TearDown() {
-  if (frame_) {
-    delete frame_;
-  }
-  frame_ = NULL;
-
-  if (revframe_) {
-    delete revframe_;
-  }
-  revframe_ = NULL;
-
-  if (far_file_) {
-    ASSERT_EQ(0, fclose(far_file_));
-  }
-  far_file_ = NULL;
-
-  if (near_file_) {
-    ASSERT_EQ(0, fclose(near_file_));
-  }
-  near_file_ = NULL;
-
-  if (apm_ != NULL) {
-    AudioProcessing::Destroy(apm_);
-  }
-  apm_ = NULL;
-}
-
-void MixStereoToMono(WebRtc_Word16* stereo,
-                     WebRtc_Word16* mono,
-                     int numSamples) {
-  for (int i = 0; i < numSamples; i++) {
-    int int32 = (static_cast<int>(stereo[i * 2]) +
-                 static_cast<int>(stereo[i * 2 + 1])) >> 1;
-    mono[i] = static_cast<WebRtc_Word16>(int32);
-  }
-}
-
-void WriteMessageLiteToFile(const char* filename,
-                            const ::google::protobuf::MessageLite& message) {
-  assert(filename != NULL);
-
-  FILE* file = fopen(filename, "wb");
-  ASSERT_TRUE(file != NULL) << "Could not open " << filename;
-  int size = message.ByteSize();
-  ASSERT_GT(size, 0);
-  unsigned char* array = new unsigned char[size];
-  ASSERT_TRUE(message.SerializeToArray(array, size));
-
-  ASSERT_EQ(1, fwrite(&size, sizeof(int), 1, file));
-  ASSERT_EQ(size, fwrite(array, sizeof(unsigned char), size, file));
-
-  delete [] array;
-  fclose(file);
-}
-
-void ReadMessageLiteFromFile(const char* filename,
-                             ::google::protobuf::MessageLite* message) {
-  assert(filename != NULL);
-  assert(message != NULL);
-
-  FILE* file = fopen(filename, "rb");
-  ASSERT_TRUE(file != NULL) << "Could not open " << filename;
-  int size = 0;
-  ASSERT_EQ(1, fread(&size, sizeof(int), 1, file));
-  ASSERT_GT(size, 0);
-  unsigned char* array = new unsigned char[size];
-  ASSERT_EQ(size, fread(array, sizeof(unsigned char), size, file));
-
-  ASSERT_TRUE(message->ParseFromArray(array, size));
-
-  delete [] array;
-  fclose(file);
-}
-
-struct ThreadData {
-  ThreadData(int thread_num_, AudioProcessing* ap_)
-      : thread_num(thread_num_),
-        error(false),
-        ap(ap_) {}
-  int thread_num;
-  bool error;
-  AudioProcessing* ap;
-};
-
-// Don't use GTest here; non-thread-safe on Windows (as of 1.5.0).
-bool DeadlockProc(void* thread_object) {
-  ThreadData* thread_data = static_cast<ThreadData*>(thread_object);
-  AudioProcessing* ap = thread_data->ap;
-  int err = ap->kNoError;
-
-  AudioFrame primary_frame;
-  AudioFrame reverse_frame;
-  primary_frame._payloadDataLengthInSamples = 320;
-  primary_frame._audioChannel = 2;
-  primary_frame._frequencyInHz = 32000;
-  reverse_frame._payloadDataLengthInSamples = 320;
-  reverse_frame._audioChannel = 2;
-  reverse_frame._frequencyInHz = 32000;
-
-  ap->echo_cancellation()->Enable(true);
-  ap->gain_control()->Enable(true);
-  ap->high_pass_filter()->Enable(true);
-  ap->level_estimator()->Enable(true);
-  ap->noise_suppression()->Enable(true);
-  ap->voice_detection()->Enable(true);
-
-  if (thread_data->thread_num % 2 == 0) {
-    err = ap->AnalyzeReverseStream(&reverse_frame);
-    if (err != ap->kNoError) {
-      printf("Error in AnalyzeReverseStream(): %d\n", err);
-      thread_data->error = true;
-      return false;
-    }
-  }
-
-  if (thread_data->thread_num % 2 == 1) {
-    ap->set_stream_delay_ms(0);
-    ap->echo_cancellation()->set_stream_drift_samples(0);
-    ap->gain_control()->set_stream_analog_level(0);
-    err = ap->ProcessStream(&primary_frame);
-    if (err == ap->kStreamParameterNotSetError) {
-      printf("Expected kStreamParameterNotSetError in ProcessStream(): %d\n",
-          err);
-    } else if (err != ap->kNoError) {
-      printf("Error in ProcessStream(): %d\n", err);
-      thread_data->error = true;
-      return false;
-    }
-    ap->gain_control()->stream_analog_level();
-  }
-
-  EventWrapper* event = EventWrapper::Create();
-  event->Wait(1);
-  delete event;
-  event = NULL;
-
-  return true;
-}
-
-/*TEST_F(ApmTest, Deadlock) {
-  const int num_threads = 16;
-  std::vector<ThreadWrapper*> threads(num_threads);
-  std::vector<ThreadData*> thread_data(num_threads);
-
-  ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
-  ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(2, 2));
-  ASSERT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(2));
-
-  for (int i = 0; i < num_threads; i++) {
-    thread_data[i] = new ThreadData(i, apm_);
-    threads[i] = ThreadWrapper::CreateThread(DeadlockProc,
-                                             thread_data[i],
-                                             kNormalPriority,
-                                             0);
-    ASSERT_TRUE(threads[i] != NULL);
-    unsigned int thread_id = 0;
-    threads[i]->Start(thread_id);
-  }
-
-  EventWrapper* event = EventWrapper::Create();
-  ASSERT_EQ(kEventTimeout, event->Wait(5000));
-  delete event;
-  event = NULL;
-
-  for (int i = 0; i < num_threads; i++) {
-    // This will return false if the thread has deadlocked.
-    ASSERT_TRUE(threads[i]->Stop());
-    ASSERT_FALSE(thread_data[i]->error);
-    delete threads[i];
-    threads[i] = NULL;
-    delete thread_data[i];
-    thread_data[i] = NULL;
-  }
-}*/
-
-TEST_F(ApmTest, StreamParameters) {
-  // No errors when the components are disabled.
-  EXPECT_EQ(apm_->kNoError,
-            apm_->ProcessStream(frame_));
-
-  // Missing agc level
-  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError,
-            apm_->ProcessStream(frame_));
-  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->set_stream_drift_samples(0));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError,
-            apm_->ProcessStream(frame_));
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
-
-  // Missing delay
-  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
-  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError,
-            apm_->ProcessStream(frame_));
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->set_stream_drift_samples(0));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->gain_control()->set_stream_analog_level(127));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError,
-            apm_->ProcessStream(frame_));
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
-
-  // Missing drift
-  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->enable_drift_compensation(true));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError,
-            apm_->ProcessStream(frame_));
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
-  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->gain_control()->set_stream_analog_level(127));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError,
-            apm_->ProcessStream(frame_));
-
-  // No stream parameters
-  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
-  EXPECT_EQ(apm_->kNoError,
-            apm_->AnalyzeReverseStream(revframe_));
-  EXPECT_EQ(apm_->kStreamParameterNotSetError,
-            apm_->ProcessStream(frame_));
-
-  // All there
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
-  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
-  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->set_stream_drift_samples(0));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->gain_control()->set_stream_analog_level(127));
-  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
-}
-
-TEST_F(ApmTest, Channels) {
-  // Testing number of invalid channels
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(0, 1));
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 0));
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(3, 1));
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 3));
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(0));
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(3));
-  // Testing number of valid channels
-  for (int i = 1; i < 3; i++) {
-    for (int j = 1; j < 3; j++) {
-      if (j > i) {
-        EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(i, j));
-      } else {
-        EXPECT_EQ(apm_->kNoError, apm_->set_num_channels(i, j));
-        EXPECT_EQ(j, apm_->num_output_channels());
-      }
-    }
-    EXPECT_EQ(i, apm_->num_input_channels());
-    EXPECT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(i));
-    EXPECT_EQ(i, apm_->num_reverse_channels());
-  }
-}
-
-TEST_F(ApmTest, SampleRates) {
-  // Testing invalid sample rates
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_sample_rate_hz(10000));
-  // Testing valid sample rates
-  int fs[] = {8000, 16000, 32000};
-  for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
-    EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(fs[i]));
-    EXPECT_EQ(fs[i], apm_->sample_rate_hz());
-  }
-}
-
-TEST_F(ApmTest, Process) {
-  GOOGLE_PROTOBUF_VERIFY_VERSION;
-  audio_processing_unittest::OutputData output_data;
-
-  if (global_read_output_data) {
-    ReadMessageLiteFromFile("output_data.pb", &output_data);
-
-  } else {
-    // We don't have a file; add the required tests to the protobuf.
-    int rev_ch[] = {1, 2};
-    int ch[] = {1, 2};
-    int fs[] = {8000, 16000, 32000};
-    for (size_t i = 0; i < sizeof(rev_ch) / sizeof(*rev_ch); i++) {
-      for (size_t j = 0; j < sizeof(ch) / sizeof(*ch); j++) {
-        for (size_t k = 0; k < sizeof(fs) / sizeof(*fs); k++) {
-          audio_processing_unittest::Test* test = output_data.add_test();
-          test->set_numreversechannels(rev_ch[i]);
-          test->set_numchannels(ch[j]);
-          test->set_samplerate(fs[k]);
-        }
-      }
-    }
-  }
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->enable_drift_compensation(true));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->enable_metrics(true));
-  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->gain_control()->set_analog_level_limits(0, 255));
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->high_pass_filter()->Enable(true));
-
-  //EXPECT_EQ(apm_->kNoError,
-  //          apm_->level_estimator()->Enable(true));
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->noise_suppression()->Enable(true));
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->voice_detection()->Enable(true));
-
-  for (int i = 0; i < output_data.test_size(); i++) {
-    printf("Running test %d of %d...\n", i + 1, output_data.test_size());
-
-    audio_processing_unittest::Test* test = output_data.mutable_test(i);
-    const int num_samples = test->samplerate() / 100;
-    revframe_->_payloadDataLengthInSamples = num_samples;
-    revframe_->_audioChannel = test->numreversechannels();
-    revframe_->_frequencyInHz = test->samplerate();
-    frame_->_payloadDataLengthInSamples = num_samples;
-    frame_->_audioChannel = test->numchannels();
-    frame_->_frequencyInHz = test->samplerate();
-
-    EXPECT_EQ(apm_->kNoError, apm_->Initialize());
-    ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(test->samplerate()));
-    ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(frame_->_audioChannel,
-                                                     frame_->_audioChannel));
-    ASSERT_EQ(apm_->kNoError,
-        apm_->set_num_reverse_channels(revframe_->_audioChannel));
-
-
-    int has_echo_count = 0;
-    int has_voice_count = 0;
-    int is_saturated_count = 0;
-
-    while (1) {
-      WebRtc_Word16 temp_data[640];
-      int analog_level = 127;
-
-      // Read far-end frame
-      size_t read_count = fread(temp_data,
-                                sizeof(WebRtc_Word16),
-                                num_samples * 2,
-                                far_file_);
-      if (read_count != static_cast<size_t>(num_samples * 2)) {
-        // Check that the file really ended.
-        ASSERT_NE(0, feof(far_file_));
-        break; // This is expected.
-      }
-
-      if (revframe_->_audioChannel == 1) {
-        MixStereoToMono(temp_data, revframe_->_payloadData,
-            revframe_->_payloadDataLengthInSamples);
-      } else {
-        memcpy(revframe_->_payloadData,
-               &temp_data[0],
-               sizeof(WebRtc_Word16) * read_count);
-      }
-
-      EXPECT_EQ(apm_->kNoError,
-          apm_->AnalyzeReverseStream(revframe_));
-
-      EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
-      EXPECT_EQ(apm_->kNoError,
-          apm_->echo_cancellation()->set_stream_drift_samples(0));
-      EXPECT_EQ(apm_->kNoError,
-          apm_->gain_control()->set_stream_analog_level(analog_level));
-
-      // Read near-end frame
-      read_count = fread(temp_data,
-                         sizeof(WebRtc_Word16),
-                         num_samples * 2,
-                         near_file_);
-      if (read_count != static_cast<size_t>(num_samples * 2)) {
-        // Check that the file really ended.
-        ASSERT_NE(0, feof(near_file_));
-        break; // This is expected.
-      }
-
-      if (frame_->_audioChannel == 1) {
-        MixStereoToMono(temp_data, frame_->_payloadData, num_samples);
-      } else {
-        memcpy(frame_->_payloadData,
-               &temp_data[0],
-               sizeof(WebRtc_Word16) * read_count);
-      }
-
-      EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
-
-      if (apm_->echo_cancellation()->stream_has_echo()) {
-        has_echo_count++;
-      }
-
-      analog_level = apm_->gain_control()->stream_analog_level();
-      if (apm_->gain_control()->stream_is_saturated()) {
-        is_saturated_count++;
-      }
-      if (apm_->voice_detection()->stream_has_voice()) {
-        has_voice_count++;
-      }
-    }
-
-    //<-- Statistics -->
-    //LevelEstimator::Metrics far_metrics;
-    //LevelEstimator::Metrics near_metrics;
-    //EchoCancellation::Metrics echo_metrics;
-    //LevelEstimator::Metrics far_metrics_ref_;
-    //LevelEstimator::Metrics near_metrics_ref_;
-    //EchoCancellation::Metrics echo_metrics_ref_;
-    //EXPECT_EQ(apm_->kNoError,
-    //          apm_->echo_cancellation()->GetMetrics(&echo_metrics));
-    //EXPECT_EQ(apm_->kNoError,
-    //          apm_->level_estimator()->GetMetrics(&near_metrics,
-
-    // TODO(ajm): check echo metrics and output audio.
-    if (global_read_output_data) {
-      EXPECT_EQ(has_echo_count,
-                test->hasechocount());
-      EXPECT_EQ(has_voice_count,
-                test->hasvoicecount());
-      EXPECT_EQ(is_saturated_count,
-                test->issaturatedcount());
-    } else {
-      test->set_hasechocount(has_echo_count);
-      test->set_hasvoicecount(has_voice_count);
-      test->set_issaturatedcount(is_saturated_count);
-    }
-
-    rewind(far_file_);
-    rewind(near_file_);
-  }
-
-  if (!global_read_output_data) {
-    WriteMessageLiteToFile("output_data.pb", output_data);
-  }
-
-  google::protobuf::ShutdownProtobufLibrary();
-}
-
-TEST_F(ApmTest, EchoCancellation) {
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->enable_drift_compensation(true));
-  EXPECT_TRUE(apm_->echo_cancellation()->is_drift_compensation_enabled());
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->enable_drift_compensation(false));
-  EXPECT_FALSE(apm_->echo_cancellation()->is_drift_compensation_enabled());
-
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->echo_cancellation()->set_device_sample_rate_hz(4000));
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->echo_cancellation()->set_device_sample_rate_hz(100000));
-
-  int rate[] = {16000, 44100, 48000};
-  for (size_t i = 0; i < sizeof(rate)/sizeof(*rate); i++) {
-    EXPECT_EQ(apm_->kNoError,
-        apm_->echo_cancellation()->set_device_sample_rate_hz(rate[i]));
-    EXPECT_EQ(rate[i],
-        apm_->echo_cancellation()->device_sample_rate_hz());
-  }
-
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->echo_cancellation()->set_suppression_level(
-          static_cast<EchoCancellation::SuppressionLevel>(-1)));
-
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->echo_cancellation()->set_suppression_level(
-          static_cast<EchoCancellation::SuppressionLevel>(4)));
-
-  EchoCancellation::SuppressionLevel level[] = {
-    EchoCancellation::kLowSuppression,
-    EchoCancellation::kModerateSuppression,
-    EchoCancellation::kHighSuppression,
-  };
-  for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
-    EXPECT_EQ(apm_->kNoError,
-        apm_->echo_cancellation()->set_suppression_level(level[i]));
-    EXPECT_EQ(level[i],
-        apm_->echo_cancellation()->suppression_level());
-  }
-
-  EchoCancellation::Metrics metrics;
-  EXPECT_EQ(apm_->kNotEnabledError,
-            apm_->echo_cancellation()->GetMetrics(&metrics));
-
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->enable_metrics(true));
-  EXPECT_TRUE(apm_->echo_cancellation()->are_metrics_enabled());
-  EXPECT_EQ(apm_->kNoError,
-            apm_->echo_cancellation()->enable_metrics(false));
-  EXPECT_FALSE(apm_->echo_cancellation()->are_metrics_enabled());
-
-  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
-  EXPECT_TRUE(apm_->echo_cancellation()->is_enabled());
-  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
-  EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
-}
-
-TEST_F(ApmTest, EchoControlMobile) {
-  // AECM won't use super-wideband.
-  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
-  EXPECT_EQ(apm_->kBadSampleRateError, apm_->echo_control_mobile()->Enable(true));
-  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
-  // Turn AECM on (and AEC off)
-  EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
-  EXPECT_TRUE(apm_->echo_control_mobile()->is_enabled());
-
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->echo_control_mobile()->set_routing_mode(
-      static_cast<EchoControlMobile::RoutingMode>(-1)));
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->echo_control_mobile()->set_routing_mode(
-      static_cast<EchoControlMobile::RoutingMode>(5)));
-
-  // Toggle routing modes
-  EchoControlMobile::RoutingMode mode[] = {
-      EchoControlMobile::kQuietEarpieceOrHeadset,
-      EchoControlMobile::kEarpiece,
-      EchoControlMobile::kLoudEarpiece,
-      EchoControlMobile::kSpeakerphone,
-      EchoControlMobile::kLoudSpeakerphone,
-  };
-  for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
-    EXPECT_EQ(apm_->kNoError,
-        apm_->echo_control_mobile()->set_routing_mode(mode[i]));
-    EXPECT_EQ(mode[i],
-        apm_->echo_control_mobile()->routing_mode());
-  }
-  // Turn comfort noise off/on
-  EXPECT_EQ(apm_->kNoError,
-      apm_->echo_control_mobile()->enable_comfort_noise(false));
-  EXPECT_FALSE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
-  EXPECT_EQ(apm_->kNoError,
-      apm_->echo_control_mobile()->enable_comfort_noise(true));
-  EXPECT_TRUE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
-  // Turn AECM off
-  EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
-  EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
-}
-
-TEST_F(ApmTest, GainControl) {
-  // Testing gain modes
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->gain_control()->set_mode(static_cast<GainControl::Mode>(-1)));
-
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->gain_control()->set_mode(static_cast<GainControl::Mode>(3)));
-
-  EXPECT_EQ(apm_->kNoError,
-      apm_->gain_control()->set_mode(
-      apm_->gain_control()->mode()));
-
-  GainControl::Mode mode[] = {
-    GainControl::kAdaptiveAnalog,
-    GainControl::kAdaptiveDigital,
-    GainControl::kFixedDigital
-  };
-  for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
-    EXPECT_EQ(apm_->kNoError,
-        apm_->gain_control()->set_mode(mode[i]));
-    EXPECT_EQ(mode[i], apm_->gain_control()->mode());
-  }
-  // Testing invalid target levels
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->gain_control()->set_target_level_dbfs(-3));
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->gain_control()->set_target_level_dbfs(-40));
-  // Testing valid target levels
-  EXPECT_EQ(apm_->kNoError,
-      apm_->gain_control()->set_target_level_dbfs(
-      apm_->gain_control()->target_level_dbfs()));
-
-  int level_dbfs[] = {0, 6, 31};
-  for (size_t i = 0; i < sizeof(level_dbfs)/sizeof(*level_dbfs); i++) {
-    EXPECT_EQ(apm_->kNoError,
-        apm_->gain_control()->set_target_level_dbfs(level_dbfs[i]));
-    EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs());
-  }
-
-  // Testing invalid compression gains
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->gain_control()->set_compression_gain_db(-1));
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->gain_control()->set_compression_gain_db(100));
-
-  // Testing valid compression gains
-  EXPECT_EQ(apm_->kNoError,
-      apm_->gain_control()->set_compression_gain_db(
-      apm_->gain_control()->compression_gain_db()));
-
-  int gain_db[] = {0, 10, 90};
-  for (size_t i = 0; i < sizeof(gain_db)/sizeof(*gain_db); i++) {
-    EXPECT_EQ(apm_->kNoError,
-        apm_->gain_control()->set_compression_gain_db(gain_db[i]));
-    EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db());
-  }
-
-  // Testing limiter off/on
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(false));
-  EXPECT_FALSE(apm_->gain_control()->is_limiter_enabled());
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(true));
-  EXPECT_TRUE(apm_->gain_control()->is_limiter_enabled());
-
-  // Testing invalid level limits
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->gain_control()->set_analog_level_limits(-1, 512));
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->gain_control()->set_analog_level_limits(100000, 512));
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->gain_control()->set_analog_level_limits(512, -1));
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->gain_control()->set_analog_level_limits(512, 100000));
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->gain_control()->set_analog_level_limits(512, 255));
-
-  // Testing valid level limits
-  EXPECT_EQ(apm_->kNoError,
-      apm_->gain_control()->set_analog_level_limits(
-      apm_->gain_control()->analog_level_minimum(),
-      apm_->gain_control()->analog_level_maximum()));
-
-  int min_level[] = {0, 255, 1024};
-  for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
-    EXPECT_EQ(apm_->kNoError,
-        apm_->gain_control()->set_analog_level_limits(min_level[i], 1024));
-    EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum());
-  }
-
-  int max_level[] = {0, 1024, 65535};
-  for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
-    EXPECT_EQ(apm_->kNoError,
-        apm_->gain_control()->set_analog_level_limits(0, max_level[i]));
-    EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum());
-  }
-
-  // TODO(ajm): stream_is_saturated() and stream_analog_level()
-
-  // Turn AGC off
-  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
-  EXPECT_FALSE(apm_->gain_control()->is_enabled());
-}
-
-TEST_F(ApmTest, NoiseSuppression) {
-  // Tesing invalid suppression levels
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->noise_suppression()->set_level(
-          static_cast<NoiseSuppression::Level>(-1)));
-
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->noise_suppression()->set_level(
-          static_cast<NoiseSuppression::Level>(5)));
-
-  // Tesing valid suppression levels
-  NoiseSuppression::Level level[] = {
-    NoiseSuppression::kLow,
-    NoiseSuppression::kModerate,
-    NoiseSuppression::kHigh,
-    NoiseSuppression::kVeryHigh
-  };
-  for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
-    EXPECT_EQ(apm_->kNoError,
-        apm_->noise_suppression()->set_level(level[i]));
-    EXPECT_EQ(level[i], apm_->noise_suppression()->level());
-  }
-
-  // Turing NS on/off
-  EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
-  EXPECT_TRUE(apm_->noise_suppression()->is_enabled());
-  EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(false));
-  EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
-}
-
-TEST_F(ApmTest, HighPassFilter) {
-  // Turing HP filter on/off
-  EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(true));
-  EXPECT_TRUE(apm_->high_pass_filter()->is_enabled());
-  EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(false));
-  EXPECT_FALSE(apm_->high_pass_filter()->is_enabled());
-}
-
-TEST_F(ApmTest, LevelEstimator) {
-  // Turing Level estimator on/off
-  EXPECT_EQ(apm_->kUnsupportedComponentError,
-            apm_->level_estimator()->Enable(true));
-  EXPECT_FALSE(apm_->level_estimator()->is_enabled());
-  EXPECT_EQ(apm_->kUnsupportedComponentError,
-            apm_->level_estimator()->Enable(false));
-  EXPECT_FALSE(apm_->level_estimator()->is_enabled());
-}
-
-TEST_F(ApmTest, VoiceDetection) {
-  // Test external VAD
-  EXPECT_EQ(apm_->kNoError,
-            apm_->voice_detection()->set_stream_has_voice(true));
-  EXPECT_TRUE(apm_->voice_detection()->stream_has_voice());
-  EXPECT_EQ(apm_->kNoError,
-            apm_->voice_detection()->set_stream_has_voice(false));
-  EXPECT_FALSE(apm_->voice_detection()->stream_has_voice());
-
-  // Tesing invalid likelihoods
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->voice_detection()->set_likelihood(
-          static_cast<VoiceDetection::Likelihood>(-1)));
-
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->voice_detection()->set_likelihood(
-          static_cast<VoiceDetection::Likelihood>(5)));
-
-  // Tesing valid likelihoods
-  VoiceDetection::Likelihood likelihood[] = {
-      VoiceDetection::kVeryLowLikelihood,
-      VoiceDetection::kLowLikelihood,
-      VoiceDetection::kModerateLikelihood,
-      VoiceDetection::kHighLikelihood
-  };
-  for (size_t i = 0; i < sizeof(likelihood)/sizeof(*likelihood); i++) {
-    EXPECT_EQ(apm_->kNoError,
-              apm_->voice_detection()->set_likelihood(likelihood[i]));
-    EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood());
-  }
-
-  /* TODO(bjornv): Enable once VAD supports other frame lengths than 10 ms
-  // Tesing invalid frame sizes
-  EXPECT_EQ(apm_->kBadParameterError,
-      apm_->voice_detection()->set_frame_size_ms(12));
-
-  // Tesing valid frame sizes
-  for (int i = 10; i <= 30; i += 10) {
-    EXPECT_EQ(apm_->kNoError,
-        apm_->voice_detection()->set_frame_size_ms(i));
-    EXPECT_EQ(i, apm_->voice_detection()->frame_size_ms());
-  }
-  */
-
-  // Turing VAD on/off
-  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
-  EXPECT_TRUE(apm_->voice_detection()->is_enabled());
-  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
-  EXPECT_FALSE(apm_->voice_detection()->is_enabled());
-
-  // TODO(bjornv): Add tests for streamed voice; stream_has_voice()
-}
-
-// Below are some ideas for tests from VPM.
-
-/*TEST_F(VideoProcessingModuleTest, GetVersionTest)
-{
-}
-
-TEST_F(VideoProcessingModuleTest, HandleNullBuffer)
-{
-}
-
-TEST_F(VideoProcessingModuleTest, HandleBadSize)
-{
-}
-
-TEST_F(VideoProcessingModuleTest, IdenticalResultsAfterReset)
-{
-}
-*/
-}  // namespace
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  ApmEnvironment* env = new ApmEnvironment; // GTest takes ownership.
-  ::testing::AddGlobalTestEnvironment(env);
-
-  for (int i = 1; i < argc; i++) {
-    if (strcmp(argv[i], "--write_output_data") == 0) {
-      global_read_output_data = false;
-    }
-  }
-
-  return RUN_ALL_TESTS();
-}
diff --git a/src/modules/audio_processing/main/source/noise_suppression_impl.cc b/src/modules/audio_processing/noise_suppression_impl.cc
similarity index 100%
rename from src/modules/audio_processing/main/source/noise_suppression_impl.cc
rename to src/modules/audio_processing/noise_suppression_impl.cc
diff --git a/src/modules/audio_processing/main/source/noise_suppression_impl.h b/src/modules/audio_processing/noise_suppression_impl.h
similarity index 100%
rename from src/modules/audio_processing/main/source/noise_suppression_impl.h
rename to src/modules/audio_processing/noise_suppression_impl.h
diff --git a/src/modules/audio_processing/ns/Android.mk b/src/modules/audio_processing/ns/Android.mk
new file mode 100644
index 0000000..255f470
--- /dev/null
+++ b/src/modules/audio_processing/ns/Android.mk
@@ -0,0 +1,79 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+#############################
+# Build the non-neon library.
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+include $(LOCAL_PATH)/../../../../android-webrtc.mk
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := libwebrtc_ns
+LOCAL_MODULE_TAGS := optional
+LOCAL_GENERATED_SOURCES :=
+LOCAL_SRC_FILES := \
+    noise_suppression_x.c \
+    nsx_core.c
+
+# Files for floating point.
+# noise_suppression.c ns_core.c 
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := $(MY_WEBRTC_COMMON_DEFS)
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/interface \
+    $(LOCAL_PATH)/../utility \
+    $(LOCAL_PATH)/../../.. \
+    $(LOCAL_PATH)/../../../common_audio/signal_processing/include \
+    $(LOCAL_PATH)/../../../system_wrappers/interface
+
+LOCAL_STATIC_LIBRARIES += libwebrtc_system_wrappers
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libdl \
+    libstlport
+
+ifndef NDK_ROOT
+include external/stlport/libstlport.mk
+endif
+include $(BUILD_STATIC_LIBRARY)
+
+#############################
+# Build the neon library.
+ifeq ($(WEBRTC_BUILD_NEON_LIBS),true)
+
+include $(CLEAR_VARS)
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := libwebrtc_ns_neon
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SRC_FILES := nsx_core_neon.c
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS) \
+    -mfpu=neon \
+    -mfloat-abi=softfp \
+    -flax-vector-conversions
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/interface \
+    $(LOCAL_PATH)/../../.. \
+    $(LOCAL_PATH)/../../../common_audio/signal_processing/include
+
+ifndef NDK_ROOT
+include external/stlport/libstlport.mk
+endif
+include $(BUILD_STATIC_LIBRARY)
+endif # ifeq ($(WEBRTC_BUILD_NEON_LIBS),true)
diff --git a/src/modules/audio_processing/ns/main/source/defines.h b/src/modules/audio_processing/ns/defines.h
similarity index 100%
rename from src/modules/audio_processing/ns/main/source/defines.h
rename to src/modules/audio_processing/ns/defines.h
diff --git a/src/modules/audio_processing/ns/main/interface/noise_suppression.h b/src/modules/audio_processing/ns/interface/noise_suppression.h
similarity index 85%
rename from src/modules/audio_processing/ns/main/interface/noise_suppression.h
rename to src/modules/audio_processing/ns/interface/noise_suppression.h
index b8983b0..907faf4 100644
--- a/src/modules/audio_processing/ns/main/interface/noise_suppression.h
+++ b/src/modules/audio_processing/ns/interface/noise_suppression.h
@@ -30,7 +30,7 @@
  * Return value         :  0 - Ok
  *                        -1 - Error (probably length is not sufficient)
  */
-int WebRtcNs_get_version(char *version, short length);
+int WebRtcNs_get_version(char* version, short length);
 
 
 /*
@@ -46,7 +46,7 @@
  * Return value         :  0 - Ok
  *                        -1 - Error
  */
-int WebRtcNs_Create(NsHandle **NS_inst);
+int WebRtcNs_Create(NsHandle** NS_inst);
 
 
 /*
@@ -59,7 +59,7 @@
  * Return value         :  0 - Ok
  *                        -1 - Error
  */
-int WebRtcNs_Free(NsHandle *NS_inst);
+int WebRtcNs_Free(NsHandle* NS_inst);
 
 
 /*
@@ -75,7 +75,7 @@
  * Return value         :  0 - Ok
  *                        -1 - Error
  */
-int WebRtcNs_Init(NsHandle *NS_inst, WebRtc_UWord32 fs);
+int WebRtcNs_Init(NsHandle* NS_inst, WebRtc_UWord32 fs);
 
 /*
  * This changes the aggressiveness of the noise suppression method.
@@ -90,7 +90,7 @@
  * Return value         :  0 - Ok
  *                        -1 - Error
  */
-int WebRtcNs_set_policy(NsHandle *NS_inst, int mode);
+int WebRtcNs_set_policy(NsHandle* NS_inst, int mode);
 
 
 /*
@@ -98,7 +98,7 @@
  * input and output signals should always be 10ms (80 or 160 samples).
  *
  * Input
- *      - NS_inst       : VAD Instance. Needs to be initiated before call.
+ *      - NS_inst       : NS Instance. Needs to be initiated before call.
  *      - spframe       : Pointer to speech frame buffer for L band
  *      - spframe_H     : Pointer to speech frame buffer for H band
  *      - fs            : sampling frequency
@@ -111,11 +111,11 @@
  * Return value         :  0 - OK
  *                        -1 - Error
  */
-int WebRtcNs_Process(NsHandle *NS_inst,
-                     short *spframe,
-                     short *spframe_H,
-                     short *outframe,
-                     short *outframe_H);
+int WebRtcNs_Process(NsHandle* NS_inst,
+                     short* spframe,
+                     short* spframe_H,
+                     short* outframe,
+                     short* outframe_H);
 
 #ifdef __cplusplus
 }
diff --git a/src/modules/audio_processing/ns/main/interface/noise_suppression_x.h b/src/modules/audio_processing/ns/interface/noise_suppression_x.h
similarity index 85%
rename from src/modules/audio_processing/ns/main/interface/noise_suppression_x.h
rename to src/modules/audio_processing/ns/interface/noise_suppression_x.h
index 35fea2f..14443fa 100644
--- a/src/modules/audio_processing/ns/main/interface/noise_suppression_x.h
+++ b/src/modules/audio_processing/ns/interface/noise_suppression_x.h
@@ -11,7 +11,7 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_INTERFACE_NOISE_SUPPRESSION_X_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_INTERFACE_NOISE_SUPPRESSION_X_H_
 
-#include "signal_processing_library.h"
+#include "typedefs.h"
 
 typedef struct NsxHandleT NsxHandle;
 
@@ -30,7 +30,7 @@
  * Return value         :  0 - Ok
  *                        -1 - Error (probably length is not sufficient)
  */
-int WebRtcNsx_get_version(char *version, short length);
+int WebRtcNsx_get_version(char* version, short length);
 
 
 /*
@@ -46,7 +46,7 @@
  * Return value         :  0 - Ok
  *                        -1 - Error
  */
-int WebRtcNsx_Create(NsxHandle **nsxInst);
+int WebRtcNsx_Create(NsxHandle** nsxInst);
 
 
 /*
@@ -59,7 +59,7 @@
  * Return value         :  0 - Ok
  *                        -1 - Error
  */
-int WebRtcNsx_Free(NsxHandle *nsxInst);
+int WebRtcNsx_Free(NsxHandle* nsxInst);
 
 
 /*
@@ -75,7 +75,7 @@
  * Return value         :  0 - Ok
  *                        -1 - Error
  */
-int WebRtcNsx_Init(NsxHandle *nsxInst, WebRtc_UWord32 fs);
+int WebRtcNsx_Init(NsxHandle* nsxInst, WebRtc_UWord32 fs);
 
 /*
  * This changes the aggressiveness of the noise suppression method.
@@ -90,7 +90,7 @@
  * Return value         :  0 - Ok
  *                        -1 - Error
  */
-int WebRtcNsx_set_policy(NsxHandle *nsxInst, int mode);
+int WebRtcNsx_set_policy(NsxHandle* nsxInst, int mode);
 
 /*
  * This functions does noise suppression for the inserted speech frame. The
@@ -110,11 +110,11 @@
  * Return value         :  0 - OK
  *                        -1 - Error
  */
-int WebRtcNsx_Process(NsxHandle *nsxInst,
-                      short *speechFrame,
-                      short *speechFrameHB,
-                      short *outFrame,
-                      short *outFrameHB);
+int WebRtcNsx_Process(NsxHandle* nsxInst,
+                      short* speechFrame,
+                      short* speechFrameHB,
+                      short* outFrame,
+                      short* outFrameHB);
 
 #ifdef __cplusplus
 }
diff --git a/src/modules/audio_processing/ns/main/source/Android.mk b/src/modules/audio_processing/ns/main/source/Android.mk
deleted file mode 100644
index 07ec98e..0000000
--- a/src/modules/audio_processing/ns/main/source/Android.mk
+++ /dev/null
@@ -1,52 +0,0 @@
-# This file is generated by gyp; do not edit. This means you!
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_MODULE := libwebrtc_ns
-LOCAL_MODULE_TAGS := optional
-LOCAL_GENERATED_SOURCES :=
-LOCAL_SRC_FILES := \
-    noise_suppression_x.c \
-    nsx_core.c 
-
-# floating point
-# noise_suppression.c ns_core.c 
-
-# Flags passed to both C and C++ files.
-MY_CFLAGS :=  
-MY_CFLAGS_C :=
-MY_DEFS := '-DNO_TCMALLOC' \
-    '-DNO_HEAPCHECKER' \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' \
-    '-DWEBRTC_THREAD_RR'
-ifeq ($(TARGET_ARCH),arm) 
-MY_DEFS += \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
-endif
-LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
-
-# Include paths placed before CFLAGS/CPPFLAGS
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/../../../../.. \
-    $(LOCAL_PATH)/../interface \
-    $(LOCAL_PATH)/../../../utility \
-    $(LOCAL_PATH)/../../../../../common_audio/signal_processing_library/main/interface 
-
-# Flags passed to only C++ (and not C) files.
-LOCAL_CPPFLAGS := 
-
-LOCAL_LDFLAGS :=
-
-LOCAL_STATIC_LIBRARIES :=
-
-LOCAL_SHARED_LIBRARIES := libcutils \
-    libdl \
-    libstlport
-LOCAL_ADDITIONAL_DEPENDENCIES :=
-
-include external/stlport/libstlport.mk
-include $(BUILD_STATIC_LIBRARY)
diff --git a/src/modules/audio_processing/ns/main/source/noise_suppression.c b/src/modules/audio_processing/ns/main/source/noise_suppression.c
deleted file mode 100644
index aed10b1..0000000
--- a/src/modules/audio_processing/ns/main/source/noise_suppression.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "noise_suppression.h"
-#include "ns_core.h"
-#include "defines.h"
-
-int WebRtcNs_get_version(char *versionStr, short length)
-{
-    const char version[] = "NS 2.2.0";
-    const short versionLen = (short)strlen(version) + 1; // +1 for null-termination
-
-    if (versionStr == NULL) {
-        return -1;
-    }
-
-    if (versionLen > length) {
-        return -1;
-    }
-
-    strncpy(versionStr, version, versionLen);
-
-    return 0;
-}
-
-int WebRtcNs_Create(NsHandle **NS_inst)
-{
-    *NS_inst = (NsHandle*) malloc(sizeof(NSinst_t));
-    if (*NS_inst!=NULL) {
-        (*(NSinst_t**)NS_inst)->initFlag=0;
-        return 0;
-    } else {
-        return -1;
-    }
-
-}
-
-int WebRtcNs_Free(NsHandle *NS_inst)
-{
-    free(NS_inst);
-    return 0;
-}
-
-
-int WebRtcNs_Init(NsHandle *NS_inst, WebRtc_UWord32 fs)
-{
-    return WebRtcNs_InitCore((NSinst_t*) NS_inst, fs);
-}
-
-int WebRtcNs_set_policy(NsHandle *NS_inst, int mode)
-{
-    return WebRtcNs_set_policy_core((NSinst_t*) NS_inst, mode);
-}
-
-
-int WebRtcNs_Process(NsHandle *NS_inst, short *spframe, short *spframe_H, short *outframe, short *outframe_H)
-{
-    return WebRtcNs_ProcessCore((NSinst_t*) NS_inst, spframe, spframe_H, outframe, outframe_H);
-}
diff --git a/src/modules/audio_processing/ns/main/source/noise_suppression_x.c b/src/modules/audio_processing/ns/main/source/noise_suppression_x.c
deleted file mode 100644
index f1ad730..0000000
--- a/src/modules/audio_processing/ns/main/source/noise_suppression_x.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "noise_suppression_x.h"
-#include "nsx_core.h"
-#include "nsx_defines.h"
-
-int WebRtcNsx_get_version(char *versionStr, short length)
-{
-    const char version[] = "NS\t3.1.0";
-    const short versionLen = (short)strlen(version) + 1; // +1 for null-termination
-
-    if (versionStr == NULL)
-    {
-        return -1;
-    }
-
-    if (versionLen > length)
-    {
-        return -1;
-    }
-
-    strncpy(versionStr, version, versionLen);
-
-    return 0;
-}
-
-int WebRtcNsx_Create(NsxHandle **nsxInst)
-{
-    *nsxInst = (NsxHandle*)malloc(sizeof(NsxInst_t));
-    if (*nsxInst != NULL)
-    {
-        (*(NsxInst_t**)nsxInst)->initFlag = 0;
-        return 0;
-    } else
-    {
-        return -1;
-    }
-
-}
-
-int WebRtcNsx_Free(NsxHandle *nsxInst)
-{
-    free(nsxInst);
-    return 0;
-}
-
-int WebRtcNsx_Init(NsxHandle *nsxInst, WebRtc_UWord32 fs)
-{
-    return WebRtcNsx_InitCore((NsxInst_t*)nsxInst, fs);
-}
-
-int WebRtcNsx_set_policy(NsxHandle *nsxInst, int mode)
-{
-    return WebRtcNsx_set_policy_core((NsxInst_t*)nsxInst, mode);
-}
-
-int WebRtcNsx_Process(NsxHandle *nsxInst, short *speechFrame, short *speechFrameHB,
-                      short *outFrame, short *outFrameHB)
-{
-    return WebRtcNsx_ProcessCore((NsxInst_t*)nsxInst, speechFrame, speechFrameHB, outFrame,
-                              outFrameHB);
-}
-
diff --git a/src/modules/audio_processing/ns/main/source/ns_core.c b/src/modules/audio_processing/ns/main/source/ns_core.c
deleted file mode 100644
index 10a1b83..0000000
--- a/src/modules/audio_processing/ns/main/source/ns_core.c
+++ /dev/null
@@ -1,1500 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <string.h>
-#include <math.h>
-//#include <stdio.h>
-#include <stdlib.h>
-#include "noise_suppression.h"
-#include "ns_core.h"
-#include "windows_private.h"
-#include "fft4g.h"
-#include "signal_processing_library.h"
-
-// Set Feature Extraction Parameters
-void WebRtcNs_set_feature_extraction_parameters(NSinst_t *inst)
-{
-    //bin size of histogram
-    inst->featureExtractionParams.binSizeLrt      = (float)0.1;
-    inst->featureExtractionParams.binSizeSpecFlat = (float)0.05;
-    inst->featureExtractionParams.binSizeSpecDiff = (float)0.1;
-
-    //range of histogram over which lrt threshold is computed
-    inst->featureExtractionParams.rangeAvgHistLrt = (float)1.0;
-
-    //scale parameters: multiply dominant peaks of the histograms by scale factor to obtain
-    // thresholds for prior model
-    inst->featureExtractionParams.factor1ModelPars = (float)1.20; //for lrt and spectral diff
-    inst->featureExtractionParams.factor2ModelPars = (float)0.9;  //for spectral_flatness:
-                                                     // used when noise is flatter than speech
-
-    //peak limit for spectral flatness (varies between 0 and 1)
-    inst->featureExtractionParams.thresPosSpecFlat = (float)0.6;
-
-    //limit on spacing of two highest peaks in histogram: spacing determined by bin size
-    inst->featureExtractionParams.limitPeakSpacingSpecFlat = 2
-            * inst->featureExtractionParams.binSizeSpecFlat;
-    inst->featureExtractionParams.limitPeakSpacingSpecDiff = 2
-            * inst->featureExtractionParams.binSizeSpecDiff;
-
-    //limit on relevance of second peak:
-    inst->featureExtractionParams.limitPeakWeightsSpecFlat = (float)0.5;
-    inst->featureExtractionParams.limitPeakWeightsSpecDiff = (float)0.5;
-
-    // fluctuation limit of lrt feature
-    inst->featureExtractionParams.thresFluctLrt = (float)0.05;
-
-    //limit on the max and min values for the feature thresholds
-    inst->featureExtractionParams.maxLrt = (float)1.0;
-    inst->featureExtractionParams.minLrt = (float)0.20;
-
-    inst->featureExtractionParams.maxSpecFlat = (float)0.95;
-    inst->featureExtractionParams.minSpecFlat = (float)0.10;
-
-    inst->featureExtractionParams.maxSpecDiff = (float)1.0;
-    inst->featureExtractionParams.minSpecDiff = (float)0.16;
-
-    //criteria of weight of histogram peak  to accept/reject feature
-    inst->featureExtractionParams.thresWeightSpecFlat = (int)(0.3
-            * (inst->modelUpdatePars[1])); //for spectral flatness
-    inst->featureExtractionParams.thresWeightSpecDiff = (int)(0.3
-            * (inst->modelUpdatePars[1])); //for spectral difference
-}
-
-// Initialize state
-int WebRtcNs_InitCore(NSinst_t *inst, WebRtc_UWord32 fs)
-{
-    int i;
-    //We only support 10ms frames
-
-    //check for valid pointer
-    if (inst == NULL)
-    {
-        return -1;
-    }
-
-    // Initialization of struct
-    if (fs == 8000 || fs == 16000 || fs == 32000)
-    {
-        inst->fs = fs;
-    }
-    else
-    {
-        return -1;
-    }
-    inst->windShift = 0;
-    if (fs == 8000)
-    {
-        // We only support 10ms frames
-        inst->blockLen = 80;
-        inst->blockLen10ms = 80;
-        inst->anaLen = 128;
-        inst->window = kBlocks80w128;
-        inst->outLen = 0;
-    }
-    else if (fs == 16000)
-    {
-        // We only support 10ms frames
-        inst->blockLen = 160;
-        inst->blockLen10ms = 160;
-        inst->anaLen = 256;
-        inst->window = kBlocks160w256;
-        inst->outLen = 0;
-    }
-    else if (fs==32000)
-    {
-        // We only support 10ms frames
-        inst->blockLen = 160;
-        inst->blockLen10ms = 160;
-        inst->anaLen = 256;
-        inst->window = kBlocks160w256;
-        inst->outLen = 0;
-    }
-    inst->magnLen = inst->anaLen / 2 + 1; // Number of frequency bins
-
-    // Initialize fft work arrays.
-    inst->ip[0] = 0; // Setting this triggers initialization.
-    memset(inst->dataBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
-    rdft(inst->anaLen, 1, inst->dataBuf, inst->ip, inst->wfft);
-
-    memset(inst->dataBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
-    memset(inst->syntBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
-
-    //for HB processing
-    memset(inst->dataBufHB, 0, sizeof(float) * ANAL_BLOCKL_MAX);
-
-    //for quantile noise estimation
-    memset(inst->quantile, 0, sizeof(float) * HALF_ANAL_BLOCKL);
-    for (i = 0; i < SIMULT * HALF_ANAL_BLOCKL; i++)
-    {
-        inst->lquantile[i] = (float)8.0;
-        inst->density[i] = (float)0.3;
-    }
-
-    for (i = 0; i < SIMULT; i++)
-    {
-        inst->counter[i] = (int)floor((float)(END_STARTUP_LONG * (i + 1)) / (float)SIMULT);
-    }
-
-    inst->updates = 0;
-
-    // Wiener filter initialization
-    for (i = 0; i < HALF_ANAL_BLOCKL; i++)
-    {
-        inst->smooth[i] = (float)1.0;
-    }
-
-    // Set the aggressiveness: default
-    inst->aggrMode = 0;
-
-    //initialize variables for new method
-    inst->priorSpeechProb = (float)0.5; //prior prob for speech/noise
-    for (i = 0; i < HALF_ANAL_BLOCKL; i++)
-    {
-        inst->magnPrev[i]      = (float)0.0; //previous mag spectrum
-        inst->noisePrev[i]     = (float)0.0; //previous noise-spectrum
-        inst->logLrtTimeAvg[i] = LRT_FEATURE_THR; //smooth LR ratio (same as threshold)
-        inst->magnAvgPause[i]  = (float)0.0; //conservative noise spectrum estimate
-        inst->speechProbHB[i]  = (float)0.0; //for estimation of HB in second pass
-        inst->initMagnEst[i]   = (float)0.0; //initial average mag spectrum
-    }
-
-    //feature quantities
-    inst->featureData[0] = SF_FEATURE_THR;  //spectral flatness (start on threshold)
-    inst->featureData[1] = (float)0.0;      //spectral entropy: not used in this version
-    inst->featureData[2] = (float)0.0;      //spectral variance: not used in this version
-    inst->featureData[3] = LRT_FEATURE_THR; //average lrt factor (start on threshold)
-    inst->featureData[4] = SF_FEATURE_THR;  //spectral template diff (start on threshold)
-    inst->featureData[5] = (float)0.0;      //normalization for spectral-diff
-    inst->featureData[6] = (float)0.0;      //window time-average of input magnitude spectrum
-
-    //histogram quantities: used to estimate/update thresholds for features
-    for (i = 0; i < HIST_PAR_EST; i++)
-    {
-        inst->histLrt[i] = 0;
-        inst->histSpecFlat[i] = 0;
-        inst->histSpecDiff[i] = 0;
-    }
-
-    inst->blockInd = -1; //frame counter
-    inst->priorModelPars[0] = LRT_FEATURE_THR; //default threshold for lrt feature
-    inst->priorModelPars[1] = (float)0.5;      //threshold for spectral flatness:
-                                               // determined on-line
-    inst->priorModelPars[2] = (float)1.0;      //sgn_map par for spectral measure:
-                                               // 1 for flatness measure
-    inst->priorModelPars[3] = (float)0.5;      //threshold for template-difference feature:
-                                               // determined on-line
-    inst->priorModelPars[4] = (float)1.0;      //default weighting parameter for lrt feature
-    inst->priorModelPars[5] = (float)0.0;      //default weighting parameter for
-                                               // spectral flatness feature
-    inst->priorModelPars[6] = (float)0.0;      //default weighting parameter for
-                                               // spectral difference feature
-
-    inst->modelUpdatePars[0] = 2;   //update flag for parameters:
-                                    // 0 no update, 1=update once, 2=update every window
-    inst->modelUpdatePars[1] = 500; //window for update
-    inst->modelUpdatePars[2] = 0;   //counter for update of conservative noise spectrum
-    //counter if the feature thresholds are updated during the sequence
-    inst->modelUpdatePars[3] = inst->modelUpdatePars[1];
-
-    inst->signalEnergy = 0.0;
-    inst->sumMagn = 0.0;
-    inst->whiteNoiseLevel = 0.0;
-    inst->pinkNoiseNumerator = 0.0;
-    inst->pinkNoiseExp = 0.0;
-
-    WebRtcNs_set_feature_extraction_parameters(inst); // Set feature configuration
-
-    //default mode
-    WebRtcNs_set_policy_core(inst, 0);
-
-
-    memset(inst->outBuf, 0, sizeof(float) * 3 * BLOCKL_MAX);
-
-    inst->initFlag = 1;
-    return 0;
-}
-
-int WebRtcNs_set_policy_core(NSinst_t *inst, int mode)
-{
-    // allow for modes:0,1,2,3
-    if (mode < 0 || mode > 3)
-    {
-        return (-1);
-    }
-
-    inst->aggrMode = mode;
-    if (mode == 0)
-    {
-        inst->overdrive = (float)1.0;
-        inst->denoiseBound = (float)0.5;
-        inst->gainmap = 0;
-    }
-    else if (mode == 1)
-    {
-        //inst->overdrive = (float)1.25;
-        inst->overdrive = (float)1.0;
-        inst->denoiseBound = (float)0.25;
-        inst->gainmap = 1;
-    }
-    else if (mode == 2)
-    {
-        //inst->overdrive = (float)1.25;
-        inst->overdrive = (float)1.1;
-        inst->denoiseBound = (float)0.125;
-        inst->gainmap = 1;
-    }
-    else if (mode == 3)
-    {
-        //inst->overdrive = (float)1.30;
-        inst->overdrive = (float)1.25;
-        inst->denoiseBound = (float)0.09;
-        inst->gainmap = 1;
-    }
-    return 0;
-}
-
-// Estimate noise
-void WebRtcNs_NoiseEstimation(NSinst_t *inst, float *magn, float *noise)
-{
-    int i, s, offset;
-    float lmagn[HALF_ANAL_BLOCKL], delta;
-
-    if (inst->updates < END_STARTUP_LONG)
-    {
-        inst->updates++;
-    }
-
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        lmagn[i] = (float)log(magn[i]);
-    }
-
-    // loop over simultaneous estimates
-    for (s = 0; s < SIMULT; s++)
-    {
-        offset = s * inst->magnLen;
-
-        // newquantest(...)
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            // compute delta
-            if (inst->density[offset + i] > 1.0)
-            {
-                delta = FACTOR * (float)1.0 / inst->density[offset + i];
-            }
-            else
-            {
-                delta = FACTOR;
-            }
-
-            // update log quantile estimate
-            if (lmagn[i] > inst->lquantile[offset + i])
-            {
-                inst->lquantile[offset + i] += QUANTILE * delta
-                        / (float)(inst->counter[s] + 1);
-            }
-            else
-            {
-                inst->lquantile[offset + i] -= ((float)1.0 - QUANTILE) * delta
-                        / (float)(inst->counter[s] + 1);
-            }
-
-            // update density estimate
-            if (fabs(lmagn[i] - inst->lquantile[offset + i]) < WIDTH)
-            {
-                inst->density[offset + i] = ((float)inst->counter[s] * inst->density[offset
-                        + i] + (float)1.0 / ((float)2.0 * WIDTH)) / (float)(inst->counter[s]
-                        + 1);
-            }
-        } // end loop over magnitude spectrum
-
-        if (inst->counter[s] >= END_STARTUP_LONG)
-        {
-            inst->counter[s] = 0;
-            if (inst->updates >= END_STARTUP_LONG)
-            {
-                for (i = 0; i < inst->magnLen; i++)
-                {
-                    inst->quantile[i] = (float)exp(inst->lquantile[offset + i]);
-                }
-            }
-        }
-
-        inst->counter[s]++;
-    } // end loop over simultaneous estimates
-
-    // Sequentially update the noise during startup
-    if (inst->updates < END_STARTUP_LONG)
-    {
-        // Use the last "s" to get noise during startup that differ from zero.
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            inst->quantile[i] = (float)exp(inst->lquantile[offset + i]);
-        }
-    }
-
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        noise[i] = inst->quantile[i];
-    }
-}
-
-// Extract thresholds for feature parameters
-// histograms are computed over some window_size (given by inst->modelUpdatePars[1])
-// thresholds and weights are extracted every window
-// flag 0 means update histogram only, flag 1 means compute the thresholds/weights
-// threshold and weights are returned in: inst->priorModelPars
-void WebRtcNs_FeatureParameterExtraction(NSinst_t *inst, int flag)
-{
-    int i, useFeatureSpecFlat, useFeatureSpecDiff, numHistLrt;
-    int maxPeak1, maxPeak2;
-    int weightPeak1SpecFlat, weightPeak2SpecFlat, weightPeak1SpecDiff, weightPeak2SpecDiff;
-
-    float binMid, featureSum;
-    float posPeak1SpecFlat, posPeak2SpecFlat, posPeak1SpecDiff, posPeak2SpecDiff;
-    float fluctLrt, avgHistLrt, avgSquareHistLrt, avgHistLrtCompl;
-
-    //3 features: lrt, flatness, difference
-    //lrt_feature = inst->featureData[3];
-    //flat_feature = inst->featureData[0];
-    //diff_feature = inst->featureData[4];
-
-    //update histograms
-    if (flag == 0)
-    {
-        // LRT
-        if ((inst->featureData[3] < HIST_PAR_EST * inst->featureExtractionParams.binSizeLrt)
-                && (inst->featureData[3] >= 0.0))
-        {
-            i = (int)(inst->featureData[3] / inst->featureExtractionParams.binSizeLrt);
-            inst->histLrt[i]++;
-        }
-        // Spectral flatness
-        if ((inst->featureData[0] < HIST_PAR_EST
-                    * inst->featureExtractionParams.binSizeSpecFlat)
-                && (inst->featureData[0] >= 0.0))
-        {
-            i = (int)(inst->featureData[0] / inst->featureExtractionParams.binSizeSpecFlat);
-            inst->histSpecFlat[i]++;
-        }
-        // Spectral difference
-        if ((inst->featureData[4] < HIST_PAR_EST
-                    * inst->featureExtractionParams.binSizeSpecDiff)
-                && (inst->featureData[4] >= 0.0))
-        {
-            i = (int)(inst->featureData[4] / inst->featureExtractionParams.binSizeSpecDiff);
-            inst->histSpecDiff[i]++;
-        }
-    }
-
-    // extract parameters for speech/noise probability
-    if (flag == 1)
-    {
-        //lrt feature: compute the average over inst->featureExtractionParams.rangeAvgHistLrt
-        avgHistLrt = 0.0;
-        avgHistLrtCompl = 0.0;
-        avgSquareHistLrt = 0.0;
-        numHistLrt = 0;
-        for (i = 0; i < HIST_PAR_EST; i++)
-        {
-            binMid = ((float)i + (float)0.5) * inst->featureExtractionParams.binSizeLrt;
-            if (binMid <= inst->featureExtractionParams.rangeAvgHistLrt)
-            {
-                avgHistLrt += inst->histLrt[i] * binMid;
-                numHistLrt += inst->histLrt[i];
-            }
-            avgSquareHistLrt += inst->histLrt[i] * binMid * binMid;
-            avgHistLrtCompl += inst->histLrt[i] * binMid;
-        }
-        if (numHistLrt > 0)
-        {
-            avgHistLrt = avgHistLrt / ((float)numHistLrt);
-        }
-        avgHistLrtCompl = avgHistLrtCompl / ((float)inst->modelUpdatePars[1]);
-        avgSquareHistLrt = avgSquareHistLrt / ((float)inst->modelUpdatePars[1]);
-        fluctLrt = avgSquareHistLrt - avgHistLrt * avgHistLrtCompl;
-        // get threshold for lrt feature:
-        if (fluctLrt < inst->featureExtractionParams.thresFluctLrt)
-        {
-            //very low fluct, so likely noise
-            inst->priorModelPars[0] = inst->featureExtractionParams.maxLrt;
-        }
-        else
-        {
-            inst->priorModelPars[0] = inst->featureExtractionParams.factor1ModelPars
-                    * avgHistLrt;
-            // check if value is within min/max range
-            if (inst->priorModelPars[0] < inst->featureExtractionParams.minLrt)
-            {
-                inst->priorModelPars[0] = inst->featureExtractionParams.minLrt;
-            }
-            if (inst->priorModelPars[0] > inst->featureExtractionParams.maxLrt)
-            {
-                inst->priorModelPars[0] = inst->featureExtractionParams.maxLrt;
-            }
-        }
-        // done with lrt feature
-
-        //
-        // for spectral flatness and spectral difference: compute the main peaks of histogram
-        maxPeak1 = 0;
-        maxPeak2 = 0;
-        posPeak1SpecFlat = 0.0;
-        posPeak2SpecFlat = 0.0;
-        weightPeak1SpecFlat = 0;
-        weightPeak2SpecFlat = 0;
-
-        // peaks for flatness
-        for (i = 0; i < HIST_PAR_EST; i++)
-        {
-            binMid = ((float)i + (float)0.5) * inst->featureExtractionParams.binSizeSpecFlat;
-            if (inst->histSpecFlat[i] > maxPeak1)
-            {
-                // Found new "first" peak
-                maxPeak2 = maxPeak1;
-                weightPeak2SpecFlat = weightPeak1SpecFlat;
-                posPeak2SpecFlat = posPeak1SpecFlat;
-
-                maxPeak1 = inst->histSpecFlat[i];
-                weightPeak1SpecFlat = inst->histSpecFlat[i];
-                posPeak1SpecFlat = binMid;
-            }
-            else if (inst->histSpecFlat[i] > maxPeak2)
-            {
-                // Found new "second" peak
-                maxPeak2 = inst->histSpecFlat[i];
-                weightPeak2SpecFlat = inst->histSpecFlat[i];
-                posPeak2SpecFlat = binMid;
-            }
-        }
-
-        //compute two peaks for spectral difference
-        maxPeak1 = 0;
-        maxPeak2 = 0;
-        posPeak1SpecDiff = 0.0;
-        posPeak2SpecDiff = 0.0;
-        weightPeak1SpecDiff = 0;
-        weightPeak2SpecDiff = 0;
-        // peaks for spectral difference
-        for (i = 0; i < HIST_PAR_EST; i++)
-        {
-            binMid = ((float)i + (float)0.5) * inst->featureExtractionParams.binSizeSpecDiff;
-            if (inst->histSpecDiff[i] > maxPeak1)
-            {
-                // Found new "first" peak
-                maxPeak2 = maxPeak1;
-                weightPeak2SpecDiff = weightPeak1SpecDiff;
-                posPeak2SpecDiff = posPeak1SpecDiff;
-
-                maxPeak1 = inst->histSpecDiff[i];
-                weightPeak1SpecDiff = inst->histSpecDiff[i];
-                posPeak1SpecDiff = binMid;
-            }
-            else if (inst->histSpecDiff[i] > maxPeak2)
-            {
-                // Found new "second" peak
-                maxPeak2 = inst->histSpecDiff[i];
-                weightPeak2SpecDiff = inst->histSpecDiff[i];
-                posPeak2SpecDiff = binMid;
-            }
-        }
-
-        // for spectrum flatness feature
-        useFeatureSpecFlat = 1;
-        // merge the two peaks if they are close
-        if ((fabs(posPeak2SpecFlat - posPeak1SpecFlat)
-                < inst->featureExtractionParams.limitPeakSpacingSpecFlat)
-                && (weightPeak2SpecFlat
-                        > inst->featureExtractionParams.limitPeakWeightsSpecFlat
-                                * weightPeak1SpecFlat))
-        {
-            weightPeak1SpecFlat += weightPeak2SpecFlat;
-            posPeak1SpecFlat = (float)0.5 * (posPeak1SpecFlat + posPeak2SpecFlat);
-        }
-        //reject if weight of peaks is not large enough, or peak value too small
-        if (weightPeak1SpecFlat < inst->featureExtractionParams.thresWeightSpecFlat
-                || posPeak1SpecFlat < inst->featureExtractionParams.thresPosSpecFlat)
-        {
-            useFeatureSpecFlat = 0;
-        }
-        // if selected, get the threshold
-        if (useFeatureSpecFlat == 1)
-        {
-            // compute the threshold
-            inst->priorModelPars[1] = inst->featureExtractionParams.factor2ModelPars
-                    * posPeak1SpecFlat;
-            //check if value is within min/max range
-            if (inst->priorModelPars[1] < inst->featureExtractionParams.minSpecFlat)
-            {
-                inst->priorModelPars[1] = inst->featureExtractionParams.minSpecFlat;
-            }
-            if (inst->priorModelPars[1] > inst->featureExtractionParams.maxSpecFlat)
-            {
-                inst->priorModelPars[1] = inst->featureExtractionParams.maxSpecFlat;
-            }
-        }
-        // done with flatness feature
-
-        // for template feature
-        useFeatureSpecDiff = 1;
-        // merge the two peaks if they are close
-        if ((fabs(posPeak2SpecDiff - posPeak1SpecDiff)
-                < inst->featureExtractionParams.limitPeakSpacingSpecDiff)
-                && (weightPeak2SpecDiff
-                        > inst->featureExtractionParams.limitPeakWeightsSpecDiff
-                                * weightPeak1SpecDiff))
-        {
-            weightPeak1SpecDiff += weightPeak2SpecDiff;
-            posPeak1SpecDiff = (float)0.5 * (posPeak1SpecDiff + posPeak2SpecDiff);
-        }
-        // get the threshold value
-        inst->priorModelPars[3] = inst->featureExtractionParams.factor1ModelPars
-                * posPeak1SpecDiff;
-        //reject if weight of peaks is not large enough
-        if (weightPeak1SpecDiff < inst->featureExtractionParams.thresWeightSpecDiff)
-        {
-            useFeatureSpecDiff = 0;
-        }
-        //check if value is within min/max range
-        if (inst->priorModelPars[3] < inst->featureExtractionParams.minSpecDiff)
-        {
-            inst->priorModelPars[3] = inst->featureExtractionParams.minSpecDiff;
-        }
-        if (inst->priorModelPars[3] > inst->featureExtractionParams.maxSpecDiff)
-        {
-            inst->priorModelPars[3] = inst->featureExtractionParams.maxSpecDiff;
-        }
-        // done with spectral difference feature
-
-        // don't use template feature if fluctuation of lrt feature is very low:
-        //  most likely just noise state
-        if (fluctLrt < inst->featureExtractionParams.thresFluctLrt)
-        {
-            useFeatureSpecDiff = 0;
-        }
-
-        // select the weights between the features
-        // inst->priorModelPars[4] is weight for lrt: always selected
-        // inst->priorModelPars[5] is weight for spectral flatness
-        // inst->priorModelPars[6] is weight for spectral difference
-        featureSum = (float)(1 + useFeatureSpecFlat + useFeatureSpecDiff);
-        inst->priorModelPars[4] = (float)1.0 / featureSum;
-        inst->priorModelPars[5] = ((float)useFeatureSpecFlat) / featureSum;
-        inst->priorModelPars[6] = ((float)useFeatureSpecDiff) / featureSum;
-
-        // set hists to zero for next update
-        if (inst->modelUpdatePars[0] >= 1)
-        {
-            for (i = 0; i < HIST_PAR_EST; i++)
-            {
-                inst->histLrt[i] = 0;
-                inst->histSpecFlat[i] = 0;
-                inst->histSpecDiff[i] = 0;
-            }
-        }
-    } // end of flag == 1
-}
-
-// Compute spectral flatness on input spectrum
-// magnIn is the magnitude spectrum
-// spectral flatness is returned in inst->featureData[0]
-void WebRtcNs_ComputeSpectralFlatness(NSinst_t *inst, float *magnIn)
-{
-    int i;
-    int shiftLP = 1; //option to remove first bin(s) from spectral measures
-    float avgSpectralFlatnessNum, avgSpectralFlatnessDen, spectralTmp;
-
-    // comute spectral measures
-    // for flatness
-    avgSpectralFlatnessNum = 0.0;
-    avgSpectralFlatnessDen = inst->sumMagn;
-    for (i = 0; i < shiftLP; i++)
-    {
-        avgSpectralFlatnessDen -= magnIn[i];
-    }
-    // compute log of ratio of the geometric to arithmetic mean: check for log(0) case
-    for (i = shiftLP; i < inst->magnLen; i++)
-    {
-        if (magnIn[i] > 0.0)
-        {
-            avgSpectralFlatnessNum += (float)log(magnIn[i]);
-        }
-        else
-        {
-            inst->featureData[0] -= SPECT_FL_TAVG * inst->featureData[0];
-            return;
-        }
-    }
-    //normalize
-    avgSpectralFlatnessDen = avgSpectralFlatnessDen / inst->magnLen;
-    avgSpectralFlatnessNum = avgSpectralFlatnessNum / inst->magnLen;
-
-    //ratio and inverse log: check for case of log(0)
-    spectralTmp = (float)exp(avgSpectralFlatnessNum) / avgSpectralFlatnessDen;
-
-    //time-avg update of spectral flatness feature
-    inst->featureData[0] += SPECT_FL_TAVG * (spectralTmp - inst->featureData[0]);
-    // done with flatness feature
-}
-
-// Compute the difference measure between input spectrum and a template/learned noise spectrum
-// magnIn is the input spectrum
-// the reference/template spectrum is inst->magnAvgPause[i]
-// returns (normalized) spectral difference in inst->featureData[4]
-void WebRtcNs_ComputeSpectralDifference(NSinst_t *inst, float *magnIn)
-{
-    // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 / var(magnAvgPause)
-    int i;
-    float avgPause, avgMagn, covMagnPause, varPause, varMagn, avgDiffNormMagn;
-
-    avgPause = 0.0;
-    avgMagn = inst->sumMagn;
-    // compute average quantities
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        //conservative smooth noise spectrum from pause frames
-        avgPause += inst->magnAvgPause[i];
-    }
-    avgPause = avgPause / ((float)inst->magnLen);
-    avgMagn = avgMagn / ((float)inst->magnLen);
-
-    covMagnPause = 0.0;
-    varPause = 0.0;
-    varMagn = 0.0;
-    // compute variance and covariance quantities
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        covMagnPause += (magnIn[i] - avgMagn) * (inst->magnAvgPause[i] - avgPause);
-        varPause += (inst->magnAvgPause[i] - avgPause) * (inst->magnAvgPause[i] - avgPause);
-        varMagn += (magnIn[i] - avgMagn) * (magnIn[i] - avgMagn);
-    }
-    covMagnPause = covMagnPause / ((float)inst->magnLen);
-    varPause = varPause / ((float)inst->magnLen);
-    varMagn = varMagn / ((float)inst->magnLen);
-    // update of average magnitude spectrum
-    inst->featureData[6] += inst->signalEnergy;
-
-    avgDiffNormMagn = varMagn - (covMagnPause * covMagnPause) / (varPause + (float)0.0001);
-    // normalize and compute time-avg update of difference feature
-    avgDiffNormMagn = (float)(avgDiffNormMagn / (inst->featureData[5] + (float)0.0001));
-    inst->featureData[4] += SPECT_DIFF_TAVG * (avgDiffNormMagn - inst->featureData[4]);
-}
-
-// Compute speech/noise probability
-// speech/noise probability is returned in: probSpeechFinal
-//magn is the input magnitude spectrum
-//noise is the noise spectrum
-//snrLocPrior is the prior snr for each freq.
-//snr loc_post is the post snr for each freq.
-void WebRtcNs_SpeechNoiseProb(NSinst_t *inst, float *probSpeechFinal, float *snrLocPrior,
-                              float *snrLocPost)
-{
-    int i, sgnMap;
-    float invLrt, gainPrior, indPrior;
-    float logLrtTimeAvgKsum, besselTmp;
-    float indicator0, indicator1, indicator2;
-    float tmpFloat1, tmpFloat2;
-    float weightIndPrior0, weightIndPrior1, weightIndPrior2;
-    float threshPrior0, threshPrior1, threshPrior2;
-    float widthPrior, widthPrior0, widthPrior1, widthPrior2;
-
-    widthPrior0 = WIDTH_PR_MAP;
-    widthPrior1 = (float)2.0 * WIDTH_PR_MAP; //width for pause region:
-                                             // lower range, so increase width in tanh map
-    widthPrior2 = (float)2.0 * WIDTH_PR_MAP; //for spectral-difference measure
-
-    //threshold parameters for features
-    threshPrior0 = inst->priorModelPars[0];
-    threshPrior1 = inst->priorModelPars[1];
-    threshPrior2 = inst->priorModelPars[3];
-
-    //sign for flatness feature
-    sgnMap = (int)(inst->priorModelPars[2]);
-
-    //weight parameters for features
-    weightIndPrior0 = inst->priorModelPars[4];
-    weightIndPrior1 = inst->priorModelPars[5];
-    weightIndPrior2 = inst->priorModelPars[6];
-
-    // compute feature based on average LR factor
-    // this is the average over all frequencies of the smooth log lrt
-    logLrtTimeAvgKsum = 0.0;
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        tmpFloat1 = (float)1.0 + (float)2.0 * snrLocPrior[i];
-        tmpFloat2 = (float)2.0 * snrLocPrior[i] / (tmpFloat1 + (float)0.0001);
-        besselTmp = (snrLocPost[i] + (float)1.0) * tmpFloat2;
-        inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - (float)log(tmpFloat1)
-                - inst->logLrtTimeAvg[i]);
-        logLrtTimeAvgKsum += inst->logLrtTimeAvg[i];
-    }
-    logLrtTimeAvgKsum = (float)logLrtTimeAvgKsum / (inst->magnLen);
-    inst->featureData[3] = logLrtTimeAvgKsum;
-    // done with computation of LR factor
-
-    //
-    //compute the indicator functions
-    //
-
-    // average lrt feature
-    widthPrior = widthPrior0;
-    //use larger width in tanh map for pause regions
-    if (logLrtTimeAvgKsum < threshPrior0)
-    {
-        widthPrior = widthPrior1;
-    }
-    // compute indicator function: sigmoid map
-    indicator0 = (float)0.5 * ((float)tanh(widthPrior * (logLrtTimeAvgKsum - threshPrior0))
-            + (float)1.0);
-
-    //spectral flatness feature
-    tmpFloat1 = inst->featureData[0];
-    widthPrior = widthPrior0;
-    //use larger width in tanh map for pause regions
-    if (sgnMap == 1 && (tmpFloat1 > threshPrior1))
-    {
-        widthPrior = widthPrior1;
-    }
-    if (sgnMap == -1 && (tmpFloat1 < threshPrior1))
-    {
-        widthPrior = widthPrior1;
-    }
-    // compute indicator function: sigmoid map
-    indicator1 = (float)0.5 * ((float)tanh(
-                                           (float)sgnMap * widthPrior * (threshPrior1
-                                                   - tmpFloat1)) + (float)1.0);
-
-    //for template spectrum-difference
-    tmpFloat1 = inst->featureData[4];
-    widthPrior = widthPrior0;
-    //use larger width in tanh map for pause regions
-    if (tmpFloat1 < threshPrior2)
-    {
-        widthPrior = widthPrior2;
-    }
-    // compute indicator function: sigmoid map
-    indicator2 = (float)0.5 * ((float)tanh(widthPrior * (tmpFloat1 - threshPrior2))
-            + (float)1.0);
-
-    //combine the indicator function with the feature weights
-    indPrior = weightIndPrior0 * indicator0 + weightIndPrior1 * indicator1 + weightIndPrior2
-            * indicator2;
-    // done with computing indicator function
-
-    //compute the prior probability
-    inst->priorSpeechProb += PRIOR_UPDATE * (indPrior - inst->priorSpeechProb);
-    // make sure probabilities are within range: keep floor to 0.01
-    if (inst->priorSpeechProb > 1.0)
-    {
-        inst->priorSpeechProb = (float)1.0;
-    }
-    if (inst->priorSpeechProb < 0.01)
-    {
-        inst->priorSpeechProb = (float)0.01;
-    }
-
-    //final speech probability: combine prior model with LR factor:
-    gainPrior = ((float)1.0 - inst->priorSpeechProb) / (inst->priorSpeechProb + (float)0.0001);
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        invLrt = (float)exp(-inst->logLrtTimeAvg[i]);
-        invLrt = (float)gainPrior * invLrt;
-        probSpeechFinal[i] = (float)1.0 / ((float)1.0 + invLrt);
-    }
-}
-
-int WebRtcNs_ProcessCore(NSinst_t *inst,
-                         short *speechFrame,
-                         short *speechFrameHB,
-                         short *outFrame,
-                         short *outFrameHB)
-{
-    // main routine for noise reduction
-
-    int     flagHB = 0;
-    int     i;
-    const int kStartBand = 5; // Skip first frequency bins during estimation.
-    int     updateParsFlag;
-
-    float   energy1, energy2, gain, factor, factor1, factor2;
-    float   signalEnergy, sumMagn;
-    float   snrPrior, currentEstimateStsa;
-    float   tmpFloat1, tmpFloat2, tmpFloat3, probSpeech, probNonSpeech;
-    float   gammaNoiseTmp, gammaNoiseOld;
-    float   noiseUpdateTmp, fTmp, dTmp;
-    float   fin[BLOCKL_MAX], fout[BLOCKL_MAX];
-    float   winData[ANAL_BLOCKL_MAX];
-    float   magn[HALF_ANAL_BLOCKL], noise[HALF_ANAL_BLOCKL];
-    float   theFilter[HALF_ANAL_BLOCKL], theFilterTmp[HALF_ANAL_BLOCKL];
-    float   snrLocPost[HALF_ANAL_BLOCKL], snrLocPrior[HALF_ANAL_BLOCKL];
-    float   probSpeechFinal[HALF_ANAL_BLOCKL], previousEstimateStsa[HALF_ANAL_BLOCKL];
-    float   real[ANAL_BLOCKL_MAX], imag[HALF_ANAL_BLOCKL];
-    // Variables during startup
-    float   sum_log_i = 0.0;
-    float   sum_log_i_square = 0.0;
-    float   sum_log_magn = 0.0;
-    float   sum_log_i_log_magn = 0.0;
-    float   parametric_noise = 0.0;
-    float   parametric_exp = 0.0;
-    float   parametric_num = 0.0;
-
-    // SWB variables
-    int     deltaBweHB = 1;
-    int     deltaGainHB = 1;
-    float   decayBweHB = 1.0;
-    float   gainMapParHB = 1.0;
-    float   gainTimeDomainHB = 1.0;
-    float   avgProbSpeechHB, avgProbSpeechHBTmp, avgFilterGainHB, gainModHB;
-
-    // Check that initiation has been done
-    if (inst->initFlag != 1)
-    {
-        return (-1);
-    }
-    // Check for valid pointers based on sampling rate
-    if (inst->fs == 32000)
-    {
-        if (speechFrameHB == NULL)
-        {
-            return -1;
-        }
-        flagHB = 1;
-        // range for averaging low band quantities for H band gain
-        deltaBweHB = (int)inst->magnLen / 4;
-        deltaGainHB = deltaBweHB;
-    }
-    //
-    updateParsFlag = inst->modelUpdatePars[0];
-    //
-
-    //for LB do all processing
-    // convert to float
-    for (i = 0; i < inst->blockLen10ms; i++)
-    {
-        fin[i] = (float)speechFrame[i];
-    }
-    // update analysis buffer for L band
-    memcpy(inst->dataBuf, inst->dataBuf + inst->blockLen10ms,
-           sizeof(float) * (inst->anaLen - inst->blockLen10ms));
-    memcpy(inst->dataBuf + inst->anaLen - inst->blockLen10ms, fin,
-           sizeof(float) * inst->blockLen10ms);
-
-    if (flagHB == 1)
-    {
-        // convert to float
-        for (i = 0; i < inst->blockLen10ms; i++)
-        {
-            fin[i] = (float)speechFrameHB[i];
-        }
-        // update analysis buffer for H band
-        memcpy(inst->dataBufHB, inst->dataBufHB + inst->blockLen10ms,
-               sizeof(float) * (inst->anaLen - inst->blockLen10ms));
-        memcpy(inst->dataBufHB + inst->anaLen - inst->blockLen10ms, fin,
-               sizeof(float) * inst->blockLen10ms);
-    }
-
-    // check if processing needed
-    if (inst->outLen == 0)
-    {
-        // windowing
-        energy1 = 0.0;
-        for (i = 0; i < inst->anaLen; i++)
-        {
-            winData[i] = inst->window[i] * inst->dataBuf[i];
-            energy1 += winData[i] * winData[i];
-        }
-        if (energy1 == 0.0)
-        {
-            // synthesize the special case of zero input
-            // we want to avoid updating statistics in this case:
-            // Updating feature statistics when we have zeros only will cause thresholds to
-            // move towards zero signal situations. This in turn has the effect that once the
-            // signal is "turned on" (non-zero values) everything will be treated as speech
-            // and there is no noise suppression effect. Depending on the duration of the
-            // inactive signal it takes a considerable amount of time for the system to learn
-            // what is noise and what is speech.
-
-            // read out fully processed segment
-            for (i = inst->windShift; i < inst->blockLen + inst->windShift; i++)
-            {
-                fout[i - inst->windShift] = inst->syntBuf[i];
-            }
-            // update synthesis buffer
-            memcpy(inst->syntBuf, inst->syntBuf + inst->blockLen,
-                   sizeof(float) * (inst->anaLen - inst->blockLen));
-            memset(inst->syntBuf + inst->anaLen - inst->blockLen, 0,
-                   sizeof(float) * inst->blockLen);
-
-            // out buffer
-            inst->outLen = inst->blockLen - inst->blockLen10ms;
-            if (inst->blockLen > inst->blockLen10ms)
-            {
-                for (i = 0; i < inst->outLen; i++)
-                {
-                    inst->outBuf[i] = fout[i + inst->blockLen10ms];
-                }
-            }
-            // convert to short
-            for (i = 0; i < inst->blockLen10ms; i++)
-            {
-                dTmp = fout[i];
-                if (dTmp < WEBRTC_SPL_WORD16_MIN)
-                {
-                    dTmp = WEBRTC_SPL_WORD16_MIN;
-                }
-                else if (dTmp > WEBRTC_SPL_WORD16_MAX)
-                {
-                    dTmp = WEBRTC_SPL_WORD16_MAX;
-                }
-                outFrame[i] = (short)dTmp;
-            }
-
-            // for time-domain gain of HB
-            if (flagHB == 1)
-            {
-                for (i = 0; i < inst->blockLen10ms; i++)
-                {
-                    dTmp = inst->dataBufHB[i];
-                    if (dTmp < WEBRTC_SPL_WORD16_MIN)
-                    {
-                        dTmp = WEBRTC_SPL_WORD16_MIN;
-                    }
-                    else if (dTmp > WEBRTC_SPL_WORD16_MAX)
-                    {
-                        dTmp = WEBRTC_SPL_WORD16_MAX;
-                    }
-                    outFrameHB[i] = (short)dTmp;
-                }
-            } // end of H band gain computation
-            //
-            return 0;
-        }
-
-        //
-        inst->blockInd++; // Update the block index only when we process a block.
-        // FFT
-        rdft(inst->anaLen, 1, winData, inst->ip, inst->wfft);
-
-        imag[0] = 0;
-        real[0] = winData[0];
-        magn[0] = (float)(fabs(real[0]) + 1.0f);
-        imag[inst->magnLen - 1] = 0;
-        real[inst->magnLen - 1] = winData[1];
-        magn[inst->magnLen - 1] = (float)(fabs(real[inst->magnLen - 1]) + 1.0f);
-        signalEnergy = (float)(real[0] * real[0]) + (float)(real[inst->magnLen - 1]
-                * real[inst->magnLen - 1]);
-        sumMagn = magn[0] + magn[inst->magnLen - 1];
-        if (inst->blockInd < END_STARTUP_SHORT)
-        {
-            inst->initMagnEst[0] += magn[0];
-            inst->initMagnEst[inst->magnLen - 1] += magn[inst->magnLen - 1];
-            tmpFloat2 = log((float)(inst->magnLen - 1));
-            sum_log_i = tmpFloat2;
-            sum_log_i_square = tmpFloat2 * tmpFloat2;
-            tmpFloat1 = log(magn[inst->magnLen - 1]);
-            sum_log_magn = tmpFloat1;
-            sum_log_i_log_magn = tmpFloat2 * tmpFloat1;
-        }
-        for (i = 1; i < inst->magnLen - 1; i++)
-        {
-            real[i] = winData[2 * i];
-            imag[i] = winData[2 * i + 1];
-            // magnitude spectrum
-            fTmp = real[i] * real[i];
-            fTmp += imag[i] * imag[i];
-            signalEnergy += fTmp;
-            magn[i] = ((float)sqrt(fTmp)) + 1.0f;
-            sumMagn += magn[i];
-            if (inst->blockInd < END_STARTUP_SHORT)
-            {
-                inst->initMagnEst[i] += magn[i];
-                if (i >= kStartBand)
-                {
-                    tmpFloat2 = log((float)i);
-                    sum_log_i += tmpFloat2;
-                    sum_log_i_square += tmpFloat2 * tmpFloat2;
-                    tmpFloat1 = log(magn[i]);
-                    sum_log_magn += tmpFloat1;
-                    sum_log_i_log_magn += tmpFloat2 * tmpFloat1;
-                }
-            }
-        }
-        signalEnergy = signalEnergy / ((float)inst->magnLen);
-        inst->signalEnergy = signalEnergy;
-        inst->sumMagn = sumMagn;
-
-        //compute spectral flatness on input spectrum
-        WebRtcNs_ComputeSpectralFlatness(inst, magn);
-        // quantile noise estimate
-        WebRtcNs_NoiseEstimation(inst, magn, noise);
-        //compute simplified noise model during startup
-        if (inst->blockInd < END_STARTUP_SHORT)
-        {
-            // Estimate White noise
-            inst->whiteNoiseLevel += sumMagn / ((float)inst->magnLen) * inst->overdrive;
-            // Estimate Pink noise parameters
-            tmpFloat1 = sum_log_i_square * ((float)(inst->magnLen - kStartBand));
-            tmpFloat1 -= (sum_log_i * sum_log_i);
-            tmpFloat2 = (sum_log_i_square * sum_log_magn - sum_log_i * sum_log_i_log_magn);
-            tmpFloat3 = tmpFloat2 / tmpFloat1;
-            // Constrain the estimated spectrum to be positive
-            if (tmpFloat3 < 0.0f)
-            {
-                tmpFloat3 = 0.0f;
-            }
-            inst->pinkNoiseNumerator += tmpFloat3;
-            tmpFloat2 = (sum_log_i * sum_log_magn);
-            tmpFloat2 -= ((float)(inst->magnLen - kStartBand)) * sum_log_i_log_magn;
-            tmpFloat3 = tmpFloat2 / tmpFloat1;
-            // Constrain the pink noise power to be in the interval [0, 1];
-            if (tmpFloat3 < 0.0f)
-            {
-                tmpFloat3 = 0.0f;
-            }
-            if (tmpFloat3 > 1.0f)
-            {
-                tmpFloat3 = 1.0f;
-            }
-            inst->pinkNoiseExp += tmpFloat3;
-
-            // Calculate frequency independent parts of parametric noise estimate.
-            if (inst->pinkNoiseExp == 0.0f)
-            {
-                // Use white noise estimate
-                parametric_noise = inst->whiteNoiseLevel;
-            }
-            else
-            {
-                // Use pink noise estimate
-                parametric_num = exp(inst->pinkNoiseNumerator / (float)(inst->blockInd + 1));
-                parametric_num *= (float)(inst->blockInd + 1);
-                parametric_exp = inst->pinkNoiseExp / (float)(inst->blockInd + 1);
-                parametric_noise = parametric_num / pow((float)kStartBand, parametric_exp);
-            }
-            for (i = 0; i < inst->magnLen; i++)
-            {
-                // Estimate the background noise using the white and pink noise parameters
-                if ((inst->pinkNoiseExp > 0.0f) && (i >= kStartBand))
-                {
-                    // Use pink noise estimate
-                    parametric_noise = parametric_num / pow((float)i, parametric_exp);
-                }
-                theFilterTmp[i] = (inst->initMagnEst[i] - inst->overdrive * parametric_noise);
-                theFilterTmp[i] /= (inst->initMagnEst[i] + (float)0.0001);
-                // Weight quantile noise with modeled noise
-                noise[i] *= (inst->blockInd);
-                tmpFloat2 = parametric_noise * (END_STARTUP_SHORT - inst->blockInd);
-                noise[i] += (tmpFloat2 / (float)(inst->blockInd + 1));
-                noise[i] /= END_STARTUP_SHORT;
-            }
-        }
-        //compute average signal during END_STARTUP_LONG time:
-        // used to normalize spectral difference measure
-        if (inst->blockInd < END_STARTUP_LONG)
-        {
-            inst->featureData[5] *= inst->blockInd;
-            inst->featureData[5] += signalEnergy;
-            inst->featureData[5] /= (inst->blockInd + 1);
-        }
-
-#ifdef PROCESS_FLOW_0
-        if (inst->blockInd > END_STARTUP_LONG)
-        {
-            //option: average the quantile noise: for check with AEC2
-            for (i = 0; i < inst->magnLen; i++)
-            {
-                noise[i] = (float)0.6 * inst->noisePrev[i] + (float)0.4 * noise[i];
-            }
-            for (i = 0; i < inst->magnLen; i++)
-            {
-                // Wiener with over sub-substraction:
-                theFilter[i] = (magn[i] - inst->overdrive * noise[i]) / (magn[i] + (float)0.0001);
-            }
-        }
-#else
-        //start processing at frames == converged+1
-            //
-        // STEP 1: compute  prior and post snr based on quantile noise est
-        //
-
-        // compute DD estimate of prior SNR: needed for new method
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            // post snr
-            snrLocPost[i] = (float)0.0;
-            if (magn[i] > noise[i])
-            {
-                snrLocPost[i] = magn[i] / (noise[i] + (float)0.0001) - (float)1.0;
-            }
-            // previous post snr
-            // previous estimate: based on previous frame with gain filter
-            previousEstimateStsa[i] = inst->magnPrev[i] / (inst->noisePrev[i] + (float)0.0001)
-                    * (inst->smooth[i]);
-            // DD estimate is sum of two terms: current estimate and previous estimate
-            // directed decision update of snrPrior
-            snrLocPrior[i] = DD_PR_SNR * previousEstimateStsa[i] + ((float)1.0 - DD_PR_SNR)
-                    * snrLocPost[i];
-            // post and prior snr needed for step 2
-        } // end of loop over freqs
-#ifdef PROCESS_FLOW_1
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            // gain filter
-            tmpFloat1 = inst->overdrive + snrLocPrior[i];
-            tmpFloat2 = (float)snrLocPrior[i] / tmpFloat1;
-            theFilter[i] = (float)tmpFloat2;
-        } // end of loop over freqs
-#endif
-        // done with step 1: dd computation of prior and post snr
-
-        //
-        //STEP 2: compute speech/noise likelihood
-        //
-#ifdef PROCESS_FLOW_2
-        // compute difference of input spectrum with learned/estimated noise spectrum
-        WebRtcNs_ComputeSpectralDifference(inst, magn);
-        // compute histograms for parameter decisions (thresholds and weights for features)
-        // parameters are extracted once every window time (=inst->modelUpdatePars[1])
-        if (updateParsFlag >= 1)
-        {
-            // counter update
-            inst->modelUpdatePars[3]--;
-            // update histogram
-            if (inst->modelUpdatePars[3] > 0)
-            {
-                WebRtcNs_FeatureParameterExtraction(inst, 0);
-            }
-            // compute model parameters
-            if (inst->modelUpdatePars[3] == 0)
-            {
-                WebRtcNs_FeatureParameterExtraction(inst, 1);
-                inst->modelUpdatePars[3] = inst->modelUpdatePars[1];
-                // if wish to update only once, set flag to zero
-                if (updateParsFlag == 1)
-                {
-                    inst->modelUpdatePars[0] = 0;
-                }
-                else
-                {
-                    // update every window:
-                    // get normalization for spectral difference for next window estimate
-                    inst->featureData[6] = inst->featureData[6]
-                            / ((float)inst->modelUpdatePars[1]);
-                    inst->featureData[5] = (float)0.5 * (inst->featureData[6]
-                            + inst->featureData[5]);
-                    inst->featureData[6] = (float)0.0;
-                }
-            }
-        }
-        // compute speech/noise probability
-        WebRtcNs_SpeechNoiseProb(inst, probSpeechFinal, snrLocPrior, snrLocPost);
-        // time-avg parameter for noise update
-        gammaNoiseTmp = NOISE_UPDATE;
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            probSpeech = probSpeechFinal[i];
-            probNonSpeech = (float)1.0 - probSpeech;
-            // temporary noise update:
-            // use it for speech frames if update value is less than previous
-            noiseUpdateTmp = gammaNoiseTmp * inst->noisePrev[i] + ((float)1.0 - gammaNoiseTmp)
-                    * (probNonSpeech * magn[i] + probSpeech * inst->noisePrev[i]);
-            //
-            // time-constant based on speech/noise state
-            gammaNoiseOld = gammaNoiseTmp;
-            gammaNoiseTmp = NOISE_UPDATE;
-            // increase gamma (i.e., less noise update) for frame likely to be speech
-            if (probSpeech > PROB_RANGE)
-            {
-                gammaNoiseTmp = SPEECH_UPDATE;
-            }
-            // conservative noise update
-            if (probSpeech < PROB_RANGE)
-            {
-                inst->magnAvgPause[i] += GAMMA_PAUSE * (magn[i] - inst->magnAvgPause[i]);
-            }
-            // noise update
-            if (gammaNoiseTmp == gammaNoiseOld)
-            {
-                noise[i] = noiseUpdateTmp;
-            }
-            else
-            {
-                noise[i] = gammaNoiseTmp * inst->noisePrev[i] + ((float)1.0 - gammaNoiseTmp)
-                        * (probNonSpeech * magn[i] + probSpeech * inst->noisePrev[i]);
-                // allow for noise update downwards:
-                //  if noise update decreases the noise, it is safe, so allow it to happen
-                if (noiseUpdateTmp < noise[i])
-                {
-                    noise[i] = noiseUpdateTmp;
-                }
-            }
-        } // end of freq loop
-        // done with step 2: noise update
-
-        //
-        // STEP 3: compute dd update of prior snr and post snr based on new noise estimate
-        //
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            // post and prior snr
-            currentEstimateStsa = (float)0.0;
-            if (magn[i] > noise[i])
-            {
-                currentEstimateStsa = magn[i] / (noise[i] + (float)0.0001) - (float)1.0;
-            }
-            // DD estimate is sume of two terms: current estimate and previous estimate
-            // directed decision update of snrPrior
-            snrPrior = DD_PR_SNR * previousEstimateStsa[i] + ((float)1.0 - DD_PR_SNR)
-                    * currentEstimateStsa;
-            // gain filter
-            tmpFloat1 = inst->overdrive + snrPrior;
-            tmpFloat2 = (float)snrPrior / tmpFloat1;
-            theFilter[i] = (float)tmpFloat2;
-        } // end of loop over freqs
-        // done with step3
-#endif
-#endif
-
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            // flooring bottom
-            if (theFilter[i] < inst->denoiseBound)
-            {
-                theFilter[i] = inst->denoiseBound;
-            }
-            // flooring top
-            if (theFilter[i] > (float)1.0)
-            {
-                theFilter[i] = 1.0;
-            }
-            if (inst->blockInd < END_STARTUP_SHORT)
-            {
-                // flooring bottom
-                if (theFilterTmp[i] < inst->denoiseBound)
-                {
-                    theFilterTmp[i] = inst->denoiseBound;
-                }
-                // flooring top
-                if (theFilterTmp[i] > (float)1.0)
-                {
-                    theFilterTmp[i] = 1.0;
-                }
-                // Weight the two suppression filters
-                theFilter[i] *= (inst->blockInd);
-                theFilterTmp[i] *= (END_STARTUP_SHORT - inst->blockInd);
-                theFilter[i] += theFilterTmp[i];
-                theFilter[i] /= (END_STARTUP_SHORT);
-            }
-            // smoothing
-#ifdef PROCESS_FLOW_0
-            inst->smooth[i] *= SMOOTH; // value set to 0.7 in define.h file
-            inst->smooth[i] += ((float)1.0 - SMOOTH) * theFilter[i];
-#else
-            inst->smooth[i] = theFilter[i];
-#endif
-            real[i] *= inst->smooth[i];
-            imag[i] *= inst->smooth[i];
-        }
-        // keep track of noise and magn spectrum for next frame
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            inst->noisePrev[i] = noise[i];
-            inst->magnPrev[i] = magn[i];
-        }
-        // back to time domain
-        winData[0] = real[0];
-        winData[1] = real[inst->magnLen - 1];
-        for (i = 1; i < inst->magnLen - 1; i++)
-        {
-            winData[2 * i] = real[i];
-            winData[2 * i + 1] = imag[i];
-        }
-        rdft(inst->anaLen, -1, winData, inst->ip, inst->wfft);
-
-        for (i = 0; i < inst->anaLen; i++)
-        {
-            real[i] = 2.0f * winData[i] / inst->anaLen; // fft scaling
-        }
-
-        //scale factor: only do it after END_STARTUP_LONG time
-        factor = (float)1.0;
-        if (inst->gainmap == 1 && inst->blockInd > END_STARTUP_LONG)
-        {
-            factor1 = (float)1.0;
-            factor2 = (float)1.0;
-
-            energy2 = 0.0;
-            for (i = 0; i < inst->anaLen;i++)
-            {
-                energy2 += (float)real[i] * (float)real[i];
-            }
-            gain = (float)sqrt(energy2 / (energy1 + (float)1.0));
-
-#ifdef PROCESS_FLOW_2
-            // scaling for new version
-            if (gain > B_LIM)
-            {
-                factor1 = (float)1.0 + (float)1.3 * (gain - B_LIM);
-                if (gain * factor1 > (float)1.0)
-                {
-                    factor1 = (float)1.0 / gain;
-                }
-            }
-            if (gain < B_LIM)
-            {
-                //don't reduce scale too much for pause regions:
-                // attenuation here should be controlled by flooring
-                if (gain <= inst->denoiseBound)
-                {
-                    gain = inst->denoiseBound;
-                }
-                factor2 = (float)1.0 - (float)0.3 * (B_LIM - gain);
-            }
-            //combine both scales with speech/noise prob:
-            // note prior (priorSpeechProb) is not frequency dependent
-            factor = inst->priorSpeechProb * factor1 + ((float)1.0 - inst->priorSpeechProb)
-                    * factor2;
-#else
-            if (gain > B_LIM)
-            {
-                factor = (float)1.0 + (float)1.3 * (gain - B_LIM);
-            }
-            else
-            {
-                factor = (float)1.0 + (float)2.0 * (gain - B_LIM);
-            }
-            if (gain * factor > (float)1.0)
-            {
-                factor = (float)1.0 / gain;
-            }
-#endif
-        } // out of inst->gainmap==1
-
-        // synthesis
-        for (i = 0; i < inst->anaLen; i++)
-        {
-            inst->syntBuf[i] += factor * inst->window[i] * (float)real[i];
-        }
-        // read out fully processed segment
-        for (i = inst->windShift; i < inst->blockLen + inst->windShift; i++)
-        {
-            fout[i - inst->windShift] = inst->syntBuf[i];
-        }
-        // update synthesis buffer
-        memcpy(inst->syntBuf, inst->syntBuf + inst->blockLen,
-               sizeof(float) * (inst->anaLen - inst->blockLen));
-        memset(inst->syntBuf + inst->anaLen - inst->blockLen, 0,
-               sizeof(float) * inst->blockLen);
-
-        // out buffer
-        inst->outLen = inst->blockLen - inst->blockLen10ms;
-        if (inst->blockLen > inst->blockLen10ms)
-        {
-            for (i = 0; i < inst->outLen; i++)
-            {
-                inst->outBuf[i] = fout[i + inst->blockLen10ms];
-            }
-        }
-    } // end of if out.len==0
-    else
-    {
-        for (i = 0; i < inst->blockLen10ms; i++)
-        {
-            fout[i] = inst->outBuf[i];
-        }
-        memcpy(inst->outBuf, inst->outBuf + inst->blockLen10ms,
-               sizeof(float) * (inst->outLen - inst->blockLen10ms));
-        memset(inst->outBuf + inst->outLen - inst->blockLen10ms, 0,
-               sizeof(float) * inst->blockLen10ms);
-        inst->outLen -= inst->blockLen10ms;
-    }
-
-    // convert to short
-    for (i = 0; i < inst->blockLen10ms; i++)
-    {
-        dTmp = fout[i];
-        if (dTmp < WEBRTC_SPL_WORD16_MIN)
-        {
-            dTmp = WEBRTC_SPL_WORD16_MIN;
-        }
-        else if (dTmp > WEBRTC_SPL_WORD16_MAX)
-        {
-            dTmp = WEBRTC_SPL_WORD16_MAX;
-        }
-        outFrame[i] = (short)dTmp;
-    }
-
-    // for time-domain gain of HB
-    if (flagHB == 1)
-    {
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            inst->speechProbHB[i] = probSpeechFinal[i];
-        }
-        if (inst->blockInd > END_STARTUP_LONG)
-        {
-            // average speech prob from low band
-            // avg over second half (i.e., 4->8kHz) of freq. spectrum
-            avgProbSpeechHB = 0.0;
-            for (i = inst->magnLen - deltaBweHB - 1; i < inst->magnLen - 1; i++)
-            {
-                avgProbSpeechHB += inst->speechProbHB[i];
-            }
-            avgProbSpeechHB = avgProbSpeechHB / ((float)deltaBweHB);
-            // average filter gain from low band
-            // average over second half (i.e., 4->8kHz) of freq. spectrum
-            avgFilterGainHB = 0.0;
-            for (i = inst->magnLen - deltaGainHB - 1; i < inst->magnLen - 1; i++)
-            {
-                avgFilterGainHB += inst->smooth[i];
-            }
-            avgFilterGainHB = avgFilterGainHB / ((float)(deltaGainHB));
-            avgProbSpeechHBTmp = (float)2.0 * avgProbSpeechHB - (float)1.0;
-            // gain based on speech prob:
-            gainModHB = (float)0.5 * ((float)1.0 + (float)tanh(gainMapParHB * avgProbSpeechHBTmp));
-            //combine gain with low band gain
-            gainTimeDomainHB = (float)0.5 * gainModHB + (float)0.5 * avgFilterGainHB;
-            if (avgProbSpeechHB >= (float)0.5)
-            {
-                gainTimeDomainHB = (float)0.25 * gainModHB + (float)0.75 * avgFilterGainHB;
-            }
-            gainTimeDomainHB = gainTimeDomainHB * decayBweHB;
-        } // end of converged
-        //make sure gain is within flooring range
-        // flooring bottom
-        if (gainTimeDomainHB < inst->denoiseBound)
-        {
-            gainTimeDomainHB = inst->denoiseBound;
-        }
-        // flooring top
-        if (gainTimeDomainHB > (float)1.0)
-        {
-            gainTimeDomainHB = 1.0;
-        }
-        //apply gain
-        for (i = 0; i < inst->blockLen10ms; i++)
-        {
-            dTmp = gainTimeDomainHB * inst->dataBufHB[i];
-            if (dTmp < WEBRTC_SPL_WORD16_MIN)
-            {
-                dTmp = WEBRTC_SPL_WORD16_MIN;
-            }
-            else if (dTmp > WEBRTC_SPL_WORD16_MAX)
-            {
-                dTmp = WEBRTC_SPL_WORD16_MAX;
-            }
-            outFrameHB[i] = (short)dTmp;
-        }
-    } // end of H band gain computation
-    //
-
-    return 0;
-}
diff --git a/src/modules/audio_processing/ns/main/source/ns_core.h b/src/modules/audio_processing/ns/main/source/ns_core.h
deleted file mode 100644
index f72e22b..0000000
--- a/src/modules/audio_processing/ns/main/source/ns_core.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
-
-#include "defines.h"
-
-typedef struct NSParaExtract_t_ {
-
-    //bin size of histogram
-    float binSizeLrt;
-    float binSizeSpecFlat;
-    float binSizeSpecDiff;
-    //range of histogram over which lrt threshold is computed
-    float rangeAvgHistLrt;
-    //scale parameters: multiply dominant peaks of the histograms by scale factor to obtain
-    //thresholds for prior model
-    float factor1ModelPars; //for lrt and spectral difference
-    float factor2ModelPars; //for spectral_flatness: used when noise is flatter than speech
-    //peak limit for spectral flatness (varies between 0 and 1)
-    float thresPosSpecFlat;
-    //limit on spacing of two highest peaks in histogram: spacing determined by bin size
-    float limitPeakSpacingSpecFlat;
-    float limitPeakSpacingSpecDiff;
-    //limit on relevance of second peak:
-    float limitPeakWeightsSpecFlat;
-    float limitPeakWeightsSpecDiff;
-    //limit on fluctuation of lrt feature
-    float thresFluctLrt;
-    //limit on the max and min values for the feature thresholds
-    float maxLrt;
-    float minLrt;
-    float maxSpecFlat;
-    float minSpecFlat;
-    float maxSpecDiff;
-    float minSpecDiff;
-    //criteria of weight of histogram peak  to accept/reject feature
-    int thresWeightSpecFlat;
-    int thresWeightSpecDiff;
-
-} NSParaExtract_t;
-
-typedef struct NSinst_t_ {
-
-    WebRtc_UWord32  fs;
-    int             blockLen;
-    int             blockLen10ms;
-    int             windShift;
-    int             outLen;
-    int             anaLen;
-    int             magnLen;
-    int             aggrMode;
-    const float*    window;
-    float           dataBuf[ANAL_BLOCKL_MAX];
-    float           syntBuf[ANAL_BLOCKL_MAX];
-    float           outBuf[3 * BLOCKL_MAX];
-
-    int             initFlag;
-    // parameters for quantile noise estimation
-    float           density[SIMULT * HALF_ANAL_BLOCKL];
-    float           lquantile[SIMULT * HALF_ANAL_BLOCKL];
-    float           quantile[HALF_ANAL_BLOCKL];
-    int             counter[SIMULT];
-    int             updates;
-    // parameters for Wiener filter
-    float           smooth[HALF_ANAL_BLOCKL];
-    float           overdrive;
-    float           denoiseBound;
-    int             gainmap;
-    // fft work arrays.
-    int             ip[IP_LENGTH];
-    float           wfft[W_LENGTH];
-
-    // parameters for new method: some not needed, will reduce/cleanup later
-    WebRtc_Word32   blockInd;                           //frame index counter
-    int             modelUpdatePars[4];                 //parameters for updating or estimating
-                                                        // thresholds/weights for prior model
-    float           priorModelPars[7];                  //parameters for prior model
-    float           noisePrev[HALF_ANAL_BLOCKL];        //noise spectrum from previous frame
-    float           magnPrev[HALF_ANAL_BLOCKL];         //magnitude spectrum of previous frame
-    float           logLrtTimeAvg[HALF_ANAL_BLOCKL];    //log lrt factor with time-smoothing
-    float           priorSpeechProb;                    //prior speech/noise probability
-    float           featureData[7];                     //data for features
-    float           magnAvgPause[HALF_ANAL_BLOCKL];     //conservative noise spectrum estimate
-    float           signalEnergy;                       //energy of magn
-    float           sumMagn;                            //sum of magn
-    float           whiteNoiseLevel;                    //initial noise estimate
-    float           initMagnEst[HALF_ANAL_BLOCKL];      //initial magnitude spectrum estimate
-    float           pinkNoiseNumerator;                 //pink noise parameter: numerator
-    float           pinkNoiseExp;                       //pink noise parameter: power of freq
-    NSParaExtract_t featureExtractionParams;            //parameters for feature extraction
-    //histograms for parameter estimation
-    int             histLrt[HIST_PAR_EST];
-    int             histSpecFlat[HIST_PAR_EST];
-    int             histSpecDiff[HIST_PAR_EST];
-    //quantities for high band estimate
-    float           speechProbHB[HALF_ANAL_BLOCKL];     //final speech/noise prob: prior + LRT
-    float           dataBufHB[ANAL_BLOCKL_MAX];         //buffering data for HB
-
-} NSinst_t;
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/****************************************************************************
- * WebRtcNs_InitCore(...)
- *
- * This function initializes a noise suppression instance
- *
- * Input:
- *      - inst          : Instance that should be initialized
- *      - fs            : Sampling frequency
- *
- * Output:
- *      - inst          : Initialized instance
- *
- * Return value         :  0 - Ok
- *                        -1 - Error
- */
-int WebRtcNs_InitCore(NSinst_t *inst, WebRtc_UWord32 fs);
-
-/****************************************************************************
- * WebRtcNs_set_policy_core(...)
- *
- * This changes the aggressiveness of the noise suppression method.
- *
- * Input:
- *      - inst          : Instance that should be initialized
- *      - mode          : 0: Mild (6 dB), 1: Medium (10 dB), 2: Aggressive (15 dB)
- *
- * Output:
- *      - NS_inst      : Initialized instance
- *
- * Return value         :  0 - Ok
- *                        -1 - Error
- */
-int WebRtcNs_set_policy_core(NSinst_t *inst, int mode);
-
-/****************************************************************************
- * WebRtcNs_ProcessCore
- *
- * Do noise suppression.
- *
- * Input:
- *      - inst          : Instance that should be initialized
- *      - inFrameLow    : Input speech frame for lower band
- *      - inFrameHigh   : Input speech frame for higher band
- *
- * Output:
- *      - inst          : Updated instance
- *      - outFrameLow   : Output speech frame for lower band
- *      - outFrameHigh  : Output speech frame for higher band
- *
- * Return value         :  0 - OK
- *                        -1 - Error
- */
-
-
-int WebRtcNs_ProcessCore(NSinst_t *inst,
-                         short *inFrameLow,
-                         short *inFrameHigh,
-                         short *outFrameLow,
-                         short *outFrameHigh);
-
-
-#ifdef __cplusplus
-}
-#endif
-#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
diff --git a/src/modules/audio_processing/ns/main/source/nsx_core.c b/src/modules/audio_processing/ns/main/source/nsx_core.c
deleted file mode 100644
index 01d3e54..0000000
--- a/src/modules/audio_processing/ns/main/source/nsx_core.c
+++ /dev/null
@@ -1,2493 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "noise_suppression_x.h"
-
-#include <assert.h>
-#include <math.h>
-#include <string.h>
-#include <stdlib.h>
-
-#include "nsx_core.h"
-
-// Skip first frequency bins during estimation. (0 <= value < 64)
-static const int kStartBand = 5;
-
-// Rounding
-static const WebRtc_Word16 kRoundTable[16] = {0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024,
-        2048, 4096, 8192, 16384};
-
-// Constants to compensate for shifting signal log(2^shifts).
-static const WebRtc_Word16 kLogTable[9] = {0, 177, 355, 532, 710, 887, 1065, 1242, 1420};
-
-static const WebRtc_Word16 kCounterDiv[201] = {32767, 16384, 10923, 8192, 6554, 5461, 4681,
-        4096, 3641, 3277, 2979, 2731, 2521, 2341, 2185, 2048, 1928, 1820, 1725, 1638, 1560,
-        1489, 1425, 1365, 1311, 1260, 1214, 1170, 1130, 1092, 1057, 1024, 993, 964, 936, 910,
-        886, 862, 840, 819, 799, 780, 762, 745, 728, 712, 697, 683, 669, 655, 643, 630, 618,
-        607, 596, 585, 575, 565, 555, 546, 537, 529, 520, 512, 504, 496, 489, 482, 475, 468,
-        462, 455, 449, 443, 437, 431, 426, 420, 415, 410, 405, 400, 395, 390, 386, 381, 377,
-        372, 368, 364, 360, 356, 352, 349, 345, 341, 338, 334, 331, 328, 324, 321, 318, 315,
-        312, 309, 306, 303, 301, 298, 295, 293, 290, 287, 285, 282, 280, 278, 275, 273, 271,
-        269, 266, 264, 262, 260, 258, 256, 254, 252, 250, 248, 246, 245, 243, 241, 239, 237,
-        236, 234, 232, 231, 229, 228, 226, 224, 223, 221, 220, 218, 217, 216, 214, 213, 211,
-        210, 209, 207, 206, 205, 204, 202, 201, 200, 199, 197, 196, 195, 194, 193, 192, 191,
-        189, 188, 187, 186, 185, 184, 183, 182, 181, 180, 179, 178, 177, 176, 175, 174, 173,
-        172, 172, 171, 170, 169, 168, 167, 166, 165, 165, 164, 163};
-
-static const WebRtc_Word16 kLogTableFrac[256] = {
-      0,   1,   3,   4,   6,   7,   9,  10,  11,  13,  14,  16,  17,  18,  20,  21,
-     22,  24,  25,  26,  28,  29,  30,  32,  33,  34,  36,  37,  38,  40,  41,  42,
-     44,  45,  46,  47,  49,  50,  51,  52,  54,  55,  56,  57,  59,  60,  61,  62,
-     63,  65,  66,  67,  68,  69,  71,  72,  73,  74,  75,  77,  78,  79,  80,  81,
-     82,  84,  85,  86,  87,  88,  89,  90,  92,  93,  94,  95,  96,  97,  98,  99,
-    100, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, 117,
-    118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
-    134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
-    150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
-    165, 166, 167, 168, 169, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 178,
-    179, 180, 181, 182, 183, 184, 185, 185, 186, 187, 188, 189, 190, 191, 192, 192,
-    193, 194, 195, 196, 197, 198, 198, 199, 200, 201, 202, 203, 203, 204, 205, 206,
-    207, 208, 208, 209, 210, 211, 212, 212, 213, 214, 215, 216, 216, 217, 218, 219,
-    220, 220, 221, 222, 223, 224, 224, 225, 226, 227, 228, 228, 229, 230, 231, 231,
-    232, 233, 234, 234, 235, 236, 237, 238, 238, 239, 240, 241, 241, 242, 243, 244,
-    244, 245, 246, 247, 247, 248, 249, 249, 250, 251, 252, 252, 253, 254, 255, 255
-};
-
-static const WebRtc_Word16 kPowTableFrac[1024] = {
-    0,    1,    1,    2,    3,    3,    4,    5,
-    6,    6,    7,    8,    8,    9,   10,   10,
-   11,   12,   13,   13,   14,   15,   15,   16,
-   17,   17,   18,   19,   20,   20,   21,   22,
-   22,   23,   24,   25,   25,   26,   27,   27,
-   28,   29,   30,   30,   31,   32,   32,   33,
-   34,   35,   35,   36,   37,   37,   38,   39,
-   40,   40,   41,   42,   42,   43,   44,   45,
-   45,   46,   47,   48,   48,   49,   50,   50,
-   51,   52,   53,   53,   54,   55,   56,   56,
-   57,   58,   58,   59,   60,   61,   61,   62,
-   63,   64,   64,   65,   66,   67,   67,   68,
-   69,   69,   70,   71,   72,   72,   73,   74,
-   75,   75,   76,   77,   78,   78,   79,   80,
-   81,   81,   82,   83,   84,   84,   85,   86,
-   87,   87,   88,   89,   90,   90,   91,   92,
-   93,   93,   94,   95,   96,   96,   97,   98,
-   99,  100,  100,  101,  102,  103,  103,  104,
-  105,  106,  106,  107,  108,  109,  109,  110,
-  111,  112,  113,  113,  114,  115,  116,  116,
-  117,  118,  119,  119,  120,  121,  122,  123,
-  123,  124,  125,  126,  126,  127,  128,  129,
-  130,  130,  131,  132,  133,  133,  134,  135,
-  136,  137,  137,  138,  139,  140,  141,  141,
-  142,  143,  144,  144,  145,  146,  147,  148,
-  148,  149,  150,  151,  152,  152,  153,  154,
-  155,  156,  156,  157,  158,  159,  160,  160,
-  161,  162,  163,  164,  164,  165,  166,  167,
-  168,  168,  169,  170,  171,  172,  173,  173,
-  174,  175,  176,  177,  177,  178,  179,  180,
-  181,  181,  182,  183,  184,  185,  186,  186,
-  187,  188,  189,  190,  190,  191,  192,  193,
-  194,  195,  195,  196,  197,  198,  199,  200,
-  200,  201,  202,  203,  204,  205,  205,  206,
-  207,  208,  209,  210,  210,  211,  212,  213,
-  214,  215,  215,  216,  217,  218,  219,  220,
-  220,  221,  222,  223,  224,  225,  225,  226,
-  227,  228,  229,  230,  231,  231,  232,  233,
-  234,  235,  236,  237,  237,  238,  239,  240,
-  241,  242,  243,  243,  244,  245,  246,  247,
-  248,  249,  249,  250,  251,  252,  253,  254,
-  255,  255,  256,  257,  258,  259,  260,  261,
-  262,  262,  263,  264,  265,  266,  267,  268,
-  268,  269,  270,  271,  272,  273,  274,  275,
-  276,  276,  277,  278,  279,  280,  281,  282,
-  283,  283,  284,  285,  286,  287,  288,  289,
-  290,  291,  291,  292,  293,  294,  295,  296,
-  297,  298,  299,  299,  300,  301,  302,  303,
-  304,  305,  306,  307,  308,  308,  309,  310,
-  311,  312,  313,  314,  315,  316,  317,  318,
-  318,  319,  320,  321,  322,  323,  324,  325,
-  326,  327,  328,  328,  329,  330,  331,  332,
-  333,  334,  335,  336,  337,  338,  339,  339,
-  340,  341,  342,  343,  344,  345,  346,  347,
-  348,  349,  350,  351,  352,  352,  353,  354,
-  355,  356,  357,  358,  359,  360,  361,  362,
-  363,  364,  365,  366,  367,  367,  368,  369,
-  370,  371,  372,  373,  374,  375,  376,  377,
-  378,  379,  380,  381,  382,  383,  384,  385,
-  385,  386,  387,  388,  389,  390,  391,  392,
-  393,  394,  395,  396,  397,  398,  399,  400,
-  401,  402,  403,  404,  405,  406,  407,  408,
-  409,  410,  410,  411,  412,  413,  414,  415,
-  416,  417,  418,  419,  420,  421,  422,  423,
-  424,  425,  426,  427,  428,  429,  430,  431,
-  432,  433,  434,  435,  436,  437,  438,  439,
-  440,  441,  442,  443,  444,  445,  446,  447,
-  448,  449,  450,  451,  452,  453,  454,  455,
-  456,  457,  458,  459,  460,  461,  462,  463,
-  464,  465,  466,  467,  468,  469,  470,  471,
-  472,  473,  474,  475,  476,  477,  478,  479,
-  480,  481,  482,  483,  484,  485,  486,  487,
-  488,  489,  490,  491,  492,  493,  494,  495,
-  496,  498,  499,  500,  501,  502,  503,  504,
-  505,  506,  507,  508,  509,  510,  511,  512,
-  513,  514,  515,  516,  517,  518,  519,  520,
-  521,  522,  523,  525,  526,  527,  528,  529,
-  530,  531,  532,  533,  534,  535,  536,  537,
-  538,  539,  540,  541,  542,  544,  545,  546,
-  547,  548,  549,  550,  551,  552,  553,  554,
-  555,  556,  557,  558,  560,  561,  562,  563,
-  564,  565,  566,  567,  568,  569,  570,  571,
-  572,  574,  575,  576,  577,  578,  579,  580,
-  581,  582,  583,  584,  585,  587,  588,  589,
-  590,  591,  592,  593,  594,  595,  596,  597,
-  599,  600,  601,  602,  603,  604,  605,  606,
-  607,  608,  610,  611,  612,  613,  614,  615,
-  616,  617,  618,  620,  621,  622,  623,  624,
-  625,  626,  627,  628,  630,  631,  632,  633,
-  634,  635,  636,  637,  639,  640,  641,  642,
-  643,  644,  645,  646,  648,  649,  650,  651,
-  652,  653,  654,  656,  657,  658,  659,  660,
-  661,  662,  664,  665,  666,  667,  668,  669,
-  670,  672,  673,  674,  675,  676,  677,  678,
-  680,  681,  682,  683,  684,  685,  687,  688,
-  689,  690,  691,  692,  693,  695,  696,  697,
-  698,  699,  700,  702,  703,  704,  705,  706,
-  708,  709,  710,  711,  712,  713,  715,  716,
-  717,  718,  719,  720,  722,  723,  724,  725,
-  726,  728,  729,  730,  731,  732,  733,  735,
-  736,  737,  738,  739,  741,  742,  743,  744,
-  745,  747,  748,  749,  750,  751,  753,  754,
-  755,  756,  757,  759,  760,  761,  762,  763,
-  765,  766,  767,  768,  770,  771,  772,  773,
-  774,  776,  777,  778,  779,  780,  782,  783,
-  784,  785,  787,  788,  789,  790,  792,  793,
-  794,  795,  796,  798,  799,  800,  801,  803,
-  804,  805,  806,  808,  809,  810,  811,  813,
-  814,  815,  816,  818,  819,  820,  821,  823,
-  824,  825,  826,  828,  829,  830,  831,  833,
-  834,  835,  836,  838,  839,  840,  841,  843,
-  844,  845,  846,  848,  849,  850,  851,  853,
-  854,  855,  857,  858,  859,  860,  862,  863,
-  864,  866,  867,  868,  869,  871,  872,  873,
-  874,  876,  877,  878,  880,  881,  882,  883,
-  885,  886,  887,  889,  890,  891,  893,  894,
-  895,  896,  898,  899,  900,  902,  903,  904,
-  906,  907,  908,  909,  911,  912,  913,  915,
-  916,  917,  919,  920,  921,  923,  924,  925,
-  927,  928,  929,  931,  932,  933,  935,  936,
-  937,  938,  940,  941,  942,  944,  945,  946,
-  948,  949,  950,  952,  953,  955,  956,  957,
-  959,  960,  961,  963,  964,  965,  967,  968,
-  969,  971,  972,  973,  975,  976,  977,  979,
-  980,  981,  983,  984,  986,  987,  988,  990,
-  991,  992,  994,  995,  996,  998,  999, 1001,
- 1002, 1003, 1005, 1006, 1007, 1009, 1010, 1012,
- 1013, 1014, 1016, 1017, 1018, 1020, 1021, 1023
-};
-
-static const WebRtc_Word16 kIndicatorTable[17] = {0, 2017, 3809, 5227, 6258, 6963, 7424, 7718,
-        7901, 8014, 8084, 8126, 8152, 8168, 8177, 8183, 8187};
-
-// hybrib Hanning & flat window
-static const WebRtc_Word16 kBlocks80w128x[128] = {
-        0,    536,   1072,   1606,   2139,   2669,   3196,   3720,   4240,   4756,   5266,
-     5771,   6270,   6762,   7246,   7723,   8192,   8652,   9102,   9543,   9974,  10394,
-    10803,  11200,  11585,  11958,  12318,  12665,  12998,  13318,  13623,  13913,  14189,
-    14449,  14694,  14924,  15137,  15334,  15515,  15679,  15826,  15956,  16069,  16165,
-    16244,  16305,  16349,  16375,  16384,  16384,  16384,  16384,  16384,  16384,  16384,
-    16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,
-    16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,
-    16384,  16384,  16384,  16384,  16375,  16349,  16305,  16244,  16165,  16069,  15956,
-    15826,  15679,  15515,  15334,  15137,  14924,  14694,  14449,  14189,  13913,  13623,
-    13318,  12998,  12665,  12318,  11958,  11585,  11200,  10803,  10394,   9974,   9543,
-     9102,   8652,   8192,   7723,   7246,   6762,   6270,   5771,   5266,   4756,   4240,
-     3720,   3196,   2669,   2139,   1606,   1072,    536
-};
-
-// hybrib Hanning & flat window
-static const WebRtc_Word16 kBlocks160w256x[256] = {
-    0,   268,   536,   804,  1072,  1339,  1606,  1872,
- 2139,  2404,  2669,  2933,  3196,  3459,  3720,  3981,
- 4240,  4499,  4756,  5012,  5266,  5520,  5771,  6021,
- 6270,  6517,  6762,  7005,  7246,  7486,  7723,  7959,
- 8192,  8423,  8652,  8878,  9102,  9324,  9543,  9760,
- 9974, 10185, 10394, 10600, 10803, 11003, 11200, 11394,
-11585, 11773, 11958, 12140, 12318, 12493, 12665, 12833,
-12998, 13160, 13318, 13472, 13623, 13770, 13913, 14053,
-14189, 14321, 14449, 14574, 14694, 14811, 14924, 15032,
-15137, 15237, 15334, 15426, 15515, 15599, 15679, 15754,
-15826, 15893, 15956, 16015, 16069, 16119, 16165, 16207,
-16244, 16277, 16305, 16329, 16349, 16364, 16375, 16382,
-16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
-16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
-16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
-16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
-16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
-16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
-16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
-16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
-16384, 16382, 16375, 16364, 16349, 16329, 16305, 16277,
-16244, 16207, 16165, 16119, 16069, 16015, 15956, 15893,
-15826, 15754, 15679, 15599, 15515, 15426, 15334, 15237,
-15137, 15032, 14924, 14811, 14694, 14574, 14449, 14321,
-14189, 14053, 13913, 13770, 13623, 13472, 13318, 13160,
-12998, 12833, 12665, 12493, 12318, 12140, 11958, 11773,
-11585, 11394, 11200, 11003, 10803, 10600, 10394, 10185,
- 9974,  9760,  9543,  9324,  9102,  8878,  8652,  8423,
- 8192,  7959,  7723,  7486,  7246,  7005,  6762,  6517,
- 6270,  6021,  5771,  5520,  5266,  5012,  4756,  4499,
- 4240,  3981,  3720,  3459,  3196,  2933,  2669,  2404,
- 2139,  1872,  1606,  1339,  1072,   804,   536,   268
-};
-
-// Gain factor table: Input value in Q8 and output value in Q13
-static const WebRtc_Word16 kFactor1Table[257] = {
-        8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8233, 8274, 8315, 8355, 8396, 8436, 8475, 8515, 8554, 8592, 8631, 8669,
-        8707, 8745, 8783, 8820, 8857, 8894, 8931, 8967, 9003, 9039, 9075, 9111, 9146, 9181,
-        9216, 9251, 9286, 9320, 9354, 9388, 9422, 9456, 9489, 9523, 9556, 9589, 9622, 9655,
-        9687, 9719, 9752, 9784, 9816, 9848, 9879, 9911, 9942, 9973, 10004, 10035, 10066,
-        10097, 10128, 10158, 10188, 10218, 10249, 10279, 10308, 10338, 10368, 10397, 10426,
-        10456, 10485, 10514, 10543, 10572, 10600, 10629, 10657, 10686, 10714, 10742, 10770,
-        10798, 10826, 10854, 10882, 10847, 10810, 10774, 10737, 10701, 10666, 10631, 10596,
-        10562, 10527, 10494, 10460, 10427, 10394, 10362, 10329, 10297, 10266, 10235, 10203,
-        10173, 10142, 10112, 10082, 10052, 10023, 9994, 9965, 9936, 9908, 9879, 9851, 9824,
-        9796, 9769, 9742, 9715, 9689, 9662, 9636, 9610, 9584, 9559, 9534, 9508, 9484, 9459,
-        9434, 9410, 9386, 9362, 9338, 9314, 9291, 9268, 9245, 9222, 9199, 9176, 9154, 9132,
-        9110, 9088, 9066, 9044, 9023, 9002, 8980, 8959, 8939, 8918, 8897, 8877, 8857, 8836,
-        8816, 8796, 8777, 8757, 8738, 8718, 8699, 8680, 8661, 8642, 8623, 8605, 8586, 8568,
-        8550, 8532, 8514, 8496, 8478, 8460, 8443, 8425, 8408, 8391, 8373, 8356, 8339, 8323,
-        8306, 8289, 8273, 8256, 8240, 8224, 8208, 8192
-};
-
-// Gain factor table: Input value in Q8 and output value in Q13
-static const WebRtc_Word16 kFactor2Aggressiveness1[257] = {
-        7577, 7577, 7577, 7577, 7577, 7577,
-        7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7596, 7614, 7632,
-        7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845,
-        7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016,
-        8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162,
-        8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192
-};
-
-// Gain factor table: Input value in Q8 and output value in Q13
-static const WebRtc_Word16 kFactor2Aggressiveness2[257] = {
-        7270, 7270, 7270, 7270, 7270, 7306,
-        7339, 7369, 7397, 7424, 7448, 7472, 7495, 7517, 7537, 7558, 7577, 7596, 7614, 7632,
-        7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845,
-        7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016,
-        8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162,
-        8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192
-};
-
-// Gain factor table: Input value in Q8 and output value in Q13
-static const WebRtc_Word16 kFactor2Aggressiveness3[257] = {
-        7184, 7184, 7184, 7229, 7270, 7306,
-        7339, 7369, 7397, 7424, 7448, 7472, 7495, 7517, 7537, 7558, 7577, 7596, 7614, 7632,
-        7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845,
-        7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016,
-        8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162,
-        8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
-        8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192
-};
-
-// sum of log2(i) from table index to inst->anaLen2 in Q5
-// Note that the first table value is invalid, since log2(0) = -infinity
-static const WebRtc_Word16 kSumLogIndex[66] = {
-        0,  22917,  22917,  22885,  22834,  22770,  22696,  22613,
-    22524,  22428,  22326,  22220,  22109,  21994,  21876,  21754,
-    21629,  21501,  21370,  21237,  21101,  20963,  20822,  20679,
-    20535,  20388,  20239,  20089,  19937,  19783,  19628,  19470,
-    19312,  19152,  18991,  18828,  18664,  18498,  18331,  18164,
-    17994,  17824,  17653,  17480,  17306,  17132,  16956,  16779,
-    16602,  16423,  16243,  16063,  15881,  15699,  15515,  15331,
-    15146,  14960,  14774,  14586,  14398,  14209,  14019,  13829,
-    13637,  13445
-};
-
-// sum of log2(i)^2 from table index to inst->anaLen2 in Q2
-// Note that the first table value is invalid, since log2(0) = -infinity
-static const WebRtc_Word16 kSumSquareLogIndex[66] = {
-        0,  16959,  16959,  16955,  16945,  16929,  16908,  16881,
-    16850,  16814,  16773,  16729,  16681,  16630,  16575,  16517,
-    16456,  16392,  16325,  16256,  16184,  16109,  16032,  15952,
-    15870,  15786,  15700,  15612,  15521,  15429,  15334,  15238,
-    15140,  15040,  14938,  14834,  14729,  14622,  14514,  14404,
-    14292,  14179,  14064,  13947,  13830,  13710,  13590,  13468,
-    13344,  13220,  13094,  12966,  12837,  12707,  12576,  12444,
-    12310,  12175,  12039,  11902,  11763,  11624,  11483,  11341,
-    11198,  11054
-};
-
-// log2(table index) in Q12
-// Note that the first table value is invalid, since log2(0) = -infinity
-static const WebRtc_Word16 kLogIndex[129] = {
-        0,      0,   4096,   6492,   8192,   9511,  10588,  11499,
-    12288,  12984,  13607,  14170,  14684,  15157,  15595,  16003,
-    16384,  16742,  17080,  17400,  17703,  17991,  18266,  18529,
-    18780,  19021,  19253,  19476,  19691,  19898,  20099,  20292,
-    20480,  20662,  20838,  21010,  21176,  21338,  21496,  21649,
-    21799,  21945,  22087,  22226,  22362,  22495,  22625,  22752,
-    22876,  22998,  23117,  23234,  23349,  23462,  23572,  23680,
-    23787,  23892,  23994,  24095,  24195,  24292,  24388,  24483,
-    24576,  24668,  24758,  24847,  24934,  25021,  25106,  25189,
-    25272,  25354,  25434,  25513,  25592,  25669,  25745,  25820,
-    25895,  25968,  26041,  26112,  26183,  26253,  26322,  26390,
-    26458,  26525,  26591,  26656,  26721,  26784,  26848,  26910,
-    26972,  27033,  27094,  27154,  27213,  27272,  27330,  27388,
-    27445,  27502,  27558,  27613,  27668,  27722,  27776,  27830,
-    27883,  27935,  27988,  28039,  28090,  28141,  28191,  28241,
-    28291,  28340,  28388,  28437,  28484,  28532,  28579,  28626,
-    28672
-};
-
-// determinant of estimation matrix in Q0 corresponding to the log2 tables above
-// Note that the first table value is invalid, since log2(0) = -infinity
-static const WebRtc_Word16 kDeterminantEstMatrix[66] = {
-        0,  29814,  25574,  22640,  20351,  18469,  16873,  15491,
-    14277,  13199,  12233,  11362,  10571,   9851,   9192,   8587,
-     8030,   7515,   7038,   6596,   6186,   5804,   5448,   5115,
-     4805,   4514,   4242,   3988,   3749,   3524,   3314,   3116,
-     2930,   2755,   2590,   2435,   2289,   2152,   2022,   1900,
-     1785,   1677,   1575,   1478,   1388,   1302,   1221,   1145,
-     1073,   1005,    942,    881,    825,    771,    721,    674,
-      629,    587,    547,    510,    475,    442,    411,    382,
-      355,    330
-};
-
-void WebRtcNsx_UpdateNoiseEstimate(NsxInst_t *inst, int offset)
-{
-    WebRtc_Word32 tmp32no1 = 0;
-    WebRtc_Word32 tmp32no2 = 0;
-
-    WebRtc_Word16 tmp16no1 = 0;
-    WebRtc_Word16 tmp16no2 = 0;
-    WebRtc_Word16 exp2Const = 11819; // Q13
-
-    int i = 0;
-
-    tmp16no2 = WebRtcSpl_MaxValueW16(inst->noiseEstLogQuantile + offset, inst->magnLen);
-    inst->qNoise = 14
-            - (int)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(exp2Const, tmp16no2, 21);
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        // inst->quantile[i]=exp(inst->lquantile[offset+i]);
-        // in Q21
-        tmp32no2 = WEBRTC_SPL_MUL_16_16(exp2Const, inst->noiseEstLogQuantile[offset + i]);
-        tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF));
-        tmp16no1 = -(WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no2, 21);
-        tmp16no1 += 21;// shift 21 to get result in Q0
-        tmp16no1 -= (WebRtc_Word16)inst->qNoise; //shift to get result in Q(qNoise)
-        if (tmp16no1 > 0)
-        {
-            inst->noiseEstQuantile[i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1 +
-                                                       kRoundTable[tmp16no1], tmp16no1);
-        }
-        else
-        {
-            inst->noiseEstQuantile[i] = (WebRtc_Word16)WEBRTC_SPL_LSHIFT_W32(tmp32no1,
-                                                                             -tmp16no1);
-        }
-    }
-}
-
-void WebRtcNsx_CalcParametricNoiseEstimate(NsxInst_t *inst,
-                                           WebRtc_Word16 pink_noise_exp_avg,
-                                           WebRtc_Word32 pink_noise_num_avg,
-                                           int freq_index,
-                                           WebRtc_UWord32 *noise_estimate,
-                                           WebRtc_UWord32 *noise_estimate_avg)
-{
-    WebRtc_Word32 tmp32no1 = 0;
-    WebRtc_Word32 tmp32no2 = 0;
-
-    WebRtc_Word16 int_part = 0;
-    WebRtc_Word16 frac_part = 0;
-
-    // Use pink noise estimate
-    // noise_estimate = 2^(pinkNoiseNumerator + pinkNoiseExp * log2(j))
-    assert(freq_index > 0);
-    tmp32no2 = WEBRTC_SPL_MUL_16_16(pink_noise_exp_avg, kLogIndex[freq_index]); // Q26
-    tmp32no2 = WEBRTC_SPL_RSHIFT_W32(tmp32no2, 15); // Q11
-    tmp32no1 = pink_noise_num_avg - tmp32no2; // Q11
-
-    // Calculate output: 2^tmp32no1
-    // Output in Q(minNorm-stages)
-    tmp32no1 += WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)(inst->minNorm - inst->stages), 11);
-    if (tmp32no1 > 0)
-    {
-        int_part = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 11);
-        frac_part = (WebRtc_Word16)(tmp32no1 & 0x000007ff); // Q11
-        // Piecewise linear approximation of 'b' in
-        // 2^(int_part+frac_part) = 2^int_part * (1 + b)
-        // 'b' is given in Q11 and below stored in frac_part.
-        if (WEBRTC_SPL_RSHIFT_W32(frac_part, 10))
-        {
-            // Upper fractional part
-            tmp32no2 = WEBRTC_SPL_MUL_32_16(2048 - frac_part, 1244); // Q21
-            tmp32no2 = 2048 - WEBRTC_SPL_RSHIFT_W32(tmp32no2, 10);
-        }
-        else
-        {
-            // Lower fractional part
-            tmp32no2 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_32_16(frac_part, 804), 10);
-        }
-        // Shift fractional part to Q(minNorm-stages)
-        tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, int_part - 11);
-        *noise_estimate_avg = WEBRTC_SPL_LSHIFT_U32(1, int_part) + (WebRtc_UWord32)tmp32no2;
-        // Scale up to initMagnEst, which is not block averaged
-        *noise_estimate = (*noise_estimate_avg) * (WebRtc_UWord32)(inst->blockIndex + 1);
-    }
-}
-
-// Initialize state
-WebRtc_Word32 WebRtcNsx_InitCore(NsxInst_t *inst, WebRtc_UWord32 fs)
-{
-    int i;
-
-    //check for valid pointer
-    if (inst == NULL)
-    {
-        return -1;
-    }
-    //
-
-    // Initialization of struct
-    if (fs == 8000 || fs == 16000 || fs == 32000)
-    {
-        inst->fs = fs;
-    } else
-    {
-        return -1;
-    }
-
-    if (fs == 8000)
-    {
-        inst->blockLen10ms = 80;
-        inst->anaLen = 128;
-        inst->stages = 7;
-        inst->window = kBlocks80w128x;
-        inst->thresholdLogLrt = 131072; //default threshold for LRT feature
-        inst->maxLrt = 0x0040000;
-        inst->minLrt = 52429;
-    } else if (fs == 16000)
-    {
-        inst->blockLen10ms = 160;
-        inst->anaLen = 256;
-        inst->stages = 8;
-        inst->window = kBlocks160w256x;
-        inst->thresholdLogLrt = 212644; //default threshold for LRT feature
-        inst->maxLrt = 0x0080000;
-        inst->minLrt = 104858;
-    } else if (fs == 32000)
-    {
-        inst->blockLen10ms = 160;
-        inst->anaLen = 256;
-        inst->stages = 8;
-        inst->window = kBlocks160w256x;
-        inst->thresholdLogLrt = 212644; //default threshold for LRT feature
-        inst->maxLrt = 0x0080000;
-        inst->minLrt = 104858;
-    }
-    inst->anaLen2 = WEBRTC_SPL_RSHIFT_W16(inst->anaLen, 1);
-    inst->magnLen = inst->anaLen2 + 1;
-
-    WebRtcSpl_ZerosArrayW16(inst->analysisBuffer, ANAL_BLOCKL_MAX);
-    WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer, ANAL_BLOCKL_MAX);
-
-    // for HB processing
-    WebRtcSpl_ZerosArrayW16(inst->dataBufHBFX, ANAL_BLOCKL_MAX);
-    // for quantile noise estimation
-    WebRtcSpl_ZerosArrayW16(inst->noiseEstQuantile, HALF_ANAL_BLOCKL);
-    for (i = 0; i < SIMULT * HALF_ANAL_BLOCKL; i++)
-    {
-        inst->noiseEstLogQuantile[i] = 2048; // Q8
-        inst->noiseEstDensity[i] = 153; // Q9
-    }
-    for (i = 0; i < SIMULT; i++)
-    {
-        inst->noiseEstCounter[i] = (WebRtc_Word16)(END_STARTUP_LONG * (i + 1)) / SIMULT;
-    }
-
-    // Initialize suppression filter with ones
-    WebRtcSpl_MemSetW16((WebRtc_Word16*)inst->noiseSupFilter, 16384, HALF_ANAL_BLOCKL);
-
-    // Set the aggressiveness: default
-    inst->aggrMode = 0;
-
-    //initialize variables for new method
-    inst->priorNonSpeechProb = 8192; // Q14(0.5) prior probability for speech/noise
-    for (i = 0; i < HALF_ANAL_BLOCKL; i++)
-    {
-        inst->prevMagnU16[i] = 0;
-        inst->prevNoiseU32[i] = 0; //previous noise-spectrum
-        inst->logLrtTimeAvgW32[i] = 0; //smooth LR ratio
-        inst->avgMagnPause[i] = 0; //conservative noise spectrum estimate
-        inst->initMagnEst[i] = 0; //initial average magnitude spectrum
-    }
-
-    //feature quantities
-    inst->thresholdSpecDiff = 50; //threshold for difference feature: determined on-line
-    inst->thresholdSpecFlat = 20480; //threshold for flatness: determined on-line
-    inst->featureLogLrt = inst->thresholdLogLrt; //average LRT factor (= threshold)
-    inst->featureSpecFlat = inst->thresholdSpecFlat; //spectral flatness (= threshold)
-    inst->featureSpecDiff = inst->thresholdSpecDiff; //spectral difference (= threshold)
-    inst->weightLogLrt = 6; //default weighting par for LRT feature
-    inst->weightSpecFlat = 0; //default weighting par for spectral flatness feature
-    inst->weightSpecDiff = 0; //default weighting par for spectral difference feature
-
-    inst->curAvgMagnEnergy = 0; //window time-average of input magnitude spectrum
-    inst->timeAvgMagnEnergy = 0; //normalization for spectral difference
-    inst->timeAvgMagnEnergyTmp = 0; //normalization for spectral difference
-
-    //histogram quantities: used to estimate/update thresholds for features
-    WebRtcSpl_ZerosArrayW16(inst->histLrt, HIST_PAR_EST);
-    WebRtcSpl_ZerosArrayW16(inst->histSpecDiff, HIST_PAR_EST);
-    WebRtcSpl_ZerosArrayW16(inst->histSpecFlat, HIST_PAR_EST);
-
-    inst->blockIndex = -1; //frame counter
-
-    //inst->modelUpdate    = 500;   //window for update
-    inst->modelUpdate = (1 << STAT_UPDATES); //window for update
-    inst->cntThresUpdate = 0; //counter feature thresholds updates
-
-    inst->sumMagn = 0;
-    inst->magnEnergy = 0;
-    inst->prevQMagn = 0;
-    inst->qNoise = 0;
-    inst->prevQNoise = 0;
-
-    inst->energyIn = 0;
-    inst->scaleEnergyIn = 0;
-
-    inst->whiteNoiseLevel = 0;
-    inst->pinkNoiseNumerator = 0;
-    inst->pinkNoiseExp = 0;
-    inst->minNorm = 15; // Start with full scale
-    inst->zeroInputSignal = 0;
-
-    //default mode
-    WebRtcNsx_set_policy_core(inst, 0);
-
-#ifdef NS_FILEDEBUG
-    inst->infile=fopen("indebug.pcm","wb");
-    inst->outfile=fopen("outdebug.pcm","wb");
-    inst->file1=fopen("file1.pcm","wb");
-    inst->file2=fopen("file2.pcm","wb");
-    inst->file3=fopen("file3.pcm","wb");
-    inst->file4=fopen("file4.pcm","wb");
-    inst->file5=fopen("file5.pcm","wb");
-#endif
-
-    inst->initFlag = 1;
-
-    return 0;
-}
-
-int WebRtcNsx_set_policy_core(NsxInst_t *inst, int mode)
-{
-    // allow for modes:0,1,2,3
-    if (mode < 0 || mode > 3)
-    {
-        return -1;
-    }
-
-    inst->aggrMode = mode;
-    if (mode == 0)
-    {
-        inst->overdrive = 256; // Q8(1.0)
-        inst->denoiseBound = 8192; // Q14(0.5)
-        inst->gainMap = 0; // No gain compensation
-    } else if (mode == 1)
-    {
-        inst->overdrive = 256; // Q8(1.0)
-        inst->denoiseBound = 4096; // Q14(0.25)
-        inst->factor2Table = kFactor2Aggressiveness1;
-        inst->gainMap = 1;
-    } else if (mode == 2)
-    {
-        inst->overdrive = 282; // ~= Q8(1.1)
-        inst->denoiseBound = 2048; // Q14(0.125)
-        inst->factor2Table = kFactor2Aggressiveness2;
-        inst->gainMap = 1;
-    } else if (mode == 3)
-    {
-        inst->overdrive = 320; // Q8(1.25)
-        inst->denoiseBound = 1475; // ~= Q14(0.09)
-        inst->factor2Table = kFactor2Aggressiveness3;
-        inst->gainMap = 1;
-    }
-    return 0;
-}
-
-void WebRtcNsx_NoiseEstimation(NsxInst_t *inst, WebRtc_UWord16 *magn, WebRtc_UWord32 *noise,
-                               WebRtc_Word16 *qNoise)
-{
-    WebRtc_Word32 numerator;
-
-    WebRtc_Word16 lmagn[HALF_ANAL_BLOCKL], counter, countDiv, countProd, delta, zeros, frac;
-    WebRtc_Word16 log2, tabind, logval, tmp16, tmp16no1, tmp16no2;
-    WebRtc_Word16 log2Const = 22713; // Q15
-    WebRtc_Word16 widthFactor = 21845;
-
-    int i, s, offset;
-
-    numerator = FACTOR_Q16;
-
-    tabind = inst->stages - inst->normData;
-    if (tabind < 0)
-    {
-        logval = -kLogTable[-tabind];
-    } else
-    {
-        logval = kLogTable[tabind];
-    }
-
-    // lmagn(i)=log(magn(i))=log(2)*log2(magn(i))
-    // magn is in Q(-stages), and the real lmagn values are:
-    // real_lmagn(i)=log(magn(i)*2^stages)=log(magn(i))+log(2^stages)
-    // lmagn in Q8
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        if (magn[i])
-        {
-            zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magn[i]);
-            frac = (WebRtc_Word16)((((WebRtc_UWord32)magn[i] << zeros) & 0x7FFFFFFF) >> 23);
-            // log2(magn(i))
-            log2 = (WebRtc_Word16)(((31 - zeros) << 8) + kLogTableFrac[frac]);
-            // log2(magn(i))*log(2)
-            lmagn[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(log2, log2Const, 15);
-            // + log(2^stages)
-            lmagn[i] += logval;
-        } else
-        {
-            lmagn[i] = logval;//0;
-        }
-    }
-
-    // loop over simultaneous estimates
-    for (s = 0; s < SIMULT; s++)
-    {
-        offset = s * inst->magnLen;
-
-        // Get counter values from state
-        counter = inst->noiseEstCounter[s];
-        countDiv = kCounterDiv[counter];
-        countProd = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16(counter, countDiv);
-
-        // quant_est(...)
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            // compute delta
-            if (inst->noiseEstDensity[offset + i] > 512)
-            {
-                delta = WebRtcSpl_DivW32W16ResW16(numerator,
-                                                  inst->noiseEstDensity[offset + i]);
-            } else
-            {
-                delta = FACTOR_Q7;
-            }
-
-            // update log quantile estimate
-            tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(delta, countDiv, 14);
-            if (lmagn[i] > inst->noiseEstLogQuantile[offset + i])
-            {
-                // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2
-                // CounterDiv=1/inst->counter[s] in Q15
-                tmp16 += 2;
-                tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 2);
-                inst->noiseEstLogQuantile[offset + i] += tmp16no1;
-            } else
-            {
-                tmp16 += 1;
-                tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 1);
-                // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2
-                tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, 3, 1);
-                inst->noiseEstLogQuantile[offset + i] -= tmp16no2;
-            }
-
-            // update density estimate
-            if (WEBRTC_SPL_ABS_W16(lmagn[i] - inst->noiseEstLogQuantile[offset + i])
-                    < WIDTH_Q8)
-            {
-                tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
-                        inst->noiseEstDensity[offset + i], countProd, 15);
-                tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(widthFactor,
-                                                                               countDiv, 15);
-                inst->noiseEstDensity[offset + i] = tmp16no1 + tmp16no2;
-            }
-        } // end loop over magnitude spectrum
-
-        if (counter >= END_STARTUP_LONG)
-        {
-            inst->noiseEstCounter[s] = 0;
-            if (inst->blockIndex >= END_STARTUP_LONG)
-            {
-                WebRtcNsx_UpdateNoiseEstimate(inst, offset);
-            }
-        }
-        inst->noiseEstCounter[s]++;
-
-    } // end loop over simultaneous estimates
-
-    // Sequentially update the noise during startup
-    if (inst->blockIndex < END_STARTUP_LONG)
-    {
-        WebRtcNsx_UpdateNoiseEstimate(inst, offset);
-    }
-
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        noise[i] = (WebRtc_UWord32)(inst->noiseEstQuantile[i]); // Q(qNoise)
-    }
-    (*qNoise) = (WebRtc_Word16)inst->qNoise;
-}
-
-// Extract thresholds for feature parameters
-// histograms are computed over some window_size (given by window_pars)
-// thresholds and weights are extracted every window
-// flag 0 means update histogram only, flag 1 means compute the thresholds/weights
-// threshold and weights are returned in: inst->priorModelPars
-void WebRtcNsx_FeatureParameterExtraction(NsxInst_t *inst, int flag)
-{
-    WebRtc_UWord32 tmpU32;
-    WebRtc_UWord32 histIndex;
-    WebRtc_UWord32 posPeak1SpecFlatFX, posPeak2SpecFlatFX;
-    WebRtc_UWord32 posPeak1SpecDiffFX, posPeak2SpecDiffFX;
-
-    WebRtc_Word32 tmp32;
-    WebRtc_Word32 fluctLrtFX, thresFluctLrtFX;
-    WebRtc_Word32 avgHistLrtFX, avgSquareHistLrtFX, avgHistLrtComplFX;
-
-    WebRtc_Word16 j;
-    WebRtc_Word16 numHistLrt;
-
-    int i;
-    int useFeatureSpecFlat, useFeatureSpecDiff, featureSum;
-    int maxPeak1, maxPeak2;
-    int weightPeak1SpecFlat, weightPeak2SpecFlat;
-    int weightPeak1SpecDiff, weightPeak2SpecDiff;
-
-    //update histograms
-    if (!flag)
-    {
-        // LRT
-        // Type casting to UWord32 is safe since negative values will not be wrapped to larger
-        // values than HIST_PAR_EST
-        histIndex = (WebRtc_UWord32)(inst->featureLogLrt);
-        if (histIndex < HIST_PAR_EST)
-        {
-            inst->histLrt[histIndex]++;
-        }
-        // Spectral flatness
-        // (inst->featureSpecFlat*20)>>10 = (inst->featureSpecFlat*5)>>8
-        histIndex = WEBRTC_SPL_RSHIFT_U32(inst->featureSpecFlat * 5, 8);
-        if (histIndex < HIST_PAR_EST)
-        {
-            inst->histSpecFlat[histIndex]++;
-        }
-        // Spectral difference
-        histIndex = HIST_PAR_EST;
-        if (inst->timeAvgMagnEnergy)
-        {
-            // Guard against division by zero
-            // If timeAvgMagnEnergy == 0 we have no normalizing statistics and therefore can't
-            // update the histogram
-            histIndex = WEBRTC_SPL_UDIV((inst->featureSpecDiff * 5) >> inst->stages,
-                                        inst->timeAvgMagnEnergy);
-        }
-        if (histIndex < HIST_PAR_EST)
-        {
-            inst->histSpecDiff[histIndex]++;
-        }
-    }
-
-    // extract parameters for speech/noise probability
-    if (flag)
-    {
-        useFeatureSpecDiff = 1;
-        //for LRT feature:
-        // compute the average over inst->featureExtractionParams.rangeAvgHistLrt
-        avgHistLrtFX = 0;
-        avgSquareHistLrtFX = 0;
-        numHistLrt = 0;
-        for (i = 0; i < BIN_SIZE_LRT; i++)
-        {
-            j = (2 * i + 1);
-            tmp32 = WEBRTC_SPL_MUL_16_16(inst->histLrt[i], j);
-            avgHistLrtFX += tmp32;
-            numHistLrt += inst->histLrt[i];
-            avgSquareHistLrtFX += WEBRTC_SPL_MUL_32_16(tmp32, j);
-        }
-        avgHistLrtComplFX = avgHistLrtFX;
-        for (; i < HIST_PAR_EST; i++)
-        {
-            j = (2 * i + 1);
-            tmp32 = WEBRTC_SPL_MUL_16_16(inst->histLrt[i], j);
-            avgHistLrtComplFX += tmp32;
-            avgSquareHistLrtFX += WEBRTC_SPL_MUL_32_16(tmp32, j);
-        }
-        fluctLrtFX = WEBRTC_SPL_MUL(avgSquareHistLrtFX, numHistLrt);
-        fluctLrtFX -= WEBRTC_SPL_MUL(avgHistLrtFX, avgHistLrtComplFX);
-        thresFluctLrtFX = THRES_FLUCT_LRT * numHistLrt;
-        // get threshold for LRT feature:
-        tmpU32 = (FACTOR_1_LRT_DIFF * (WebRtc_UWord32)avgHistLrtFX);
-        if ((fluctLrtFX < thresFluctLrtFX) || (numHistLrt == 0) || (tmpU32
-                > (WebRtc_UWord32)(100 * numHistLrt)))
-        {
-            inst->thresholdLogLrt = inst->maxLrt; //very low fluctuation, so likely noise
-        } else
-        {
-            tmp32 = (WebRtc_Word32)((tmpU32 << (9 + inst->stages)) / numHistLrt / 25);
-            // check if value is within min/max range
-            inst->thresholdLogLrt = WEBRTC_SPL_SAT(inst->maxLrt, tmp32, inst->minLrt);
-        }
-        if (fluctLrtFX < thresFluctLrtFX)
-        {
-            // Do not use difference feature if fluctuation of LRT feature is very low:
-            // most likely just noise state
-            useFeatureSpecDiff = 0;
-        }
-
-        // for spectral flatness and spectral difference: compute the main peaks of histogram
-        maxPeak1 = 0;
-        maxPeak2 = 0;
-        posPeak1SpecFlatFX = 0;
-        posPeak2SpecFlatFX = 0;
-        weightPeak1SpecFlat = 0;
-        weightPeak2SpecFlat = 0;
-
-        // peaks for flatness
-        for (i = 0; i < HIST_PAR_EST; i++)
-        {
-            if (inst->histSpecFlat[i] > maxPeak1)
-            {
-                // Found new "first" peak
-                maxPeak2 = maxPeak1;
-                weightPeak2SpecFlat = weightPeak1SpecFlat;
-                posPeak2SpecFlatFX = posPeak1SpecFlatFX;
-
-                maxPeak1 = inst->histSpecFlat[i];
-                weightPeak1SpecFlat = inst->histSpecFlat[i];
-                posPeak1SpecFlatFX = (WebRtc_UWord32)(2 * i + 1);
-            } else if (inst->histSpecFlat[i] > maxPeak2)
-            {
-                // Found new "second" peak
-                maxPeak2 = inst->histSpecFlat[i];
-                weightPeak2SpecFlat = inst->histSpecFlat[i];
-                posPeak2SpecFlatFX = (WebRtc_UWord32)(2 * i + 1);
-            }
-        }
-
-        // for spectral flatness feature
-        useFeatureSpecFlat = 1;
-        // merge the two peaks if they are close
-        if ((posPeak1SpecFlatFX - posPeak2SpecFlatFX < LIM_PEAK_SPACE_FLAT_DIFF)
-                && (weightPeak2SpecFlat * LIM_PEAK_WEIGHT_FLAT_DIFF > weightPeak1SpecFlat))
-        {
-            weightPeak1SpecFlat += weightPeak2SpecFlat;
-            posPeak1SpecFlatFX = (posPeak1SpecFlatFX + posPeak2SpecFlatFX) >> 1;
-        }
-        //reject if weight of peaks is not large enough, or peak value too small
-        if (weightPeak1SpecFlat < THRES_WEIGHT_FLAT_DIFF || posPeak1SpecFlatFX
-                < THRES_PEAK_FLAT)
-        {
-            useFeatureSpecFlat = 0;
-        } else // if selected, get the threshold
-        {
-            // compute the threshold and check if value is within min/max range
-            inst->thresholdSpecFlat = WEBRTC_SPL_SAT(MAX_FLAT_Q10, FACTOR_2_FLAT_Q10
-                                                     * posPeak1SpecFlatFX, MIN_FLAT_Q10); //Q10
-        }
-        // done with flatness feature
-
-        if (useFeatureSpecDiff)
-        {
-            //compute two peaks for spectral difference
-            maxPeak1 = 0;
-            maxPeak2 = 0;
-            posPeak1SpecDiffFX = 0;
-            posPeak2SpecDiffFX = 0;
-            weightPeak1SpecDiff = 0;
-            weightPeak2SpecDiff = 0;
-            // peaks for spectral difference
-            for (i = 0; i < HIST_PAR_EST; i++)
-            {
-                if (inst->histSpecDiff[i] > maxPeak1)
-                {
-                    // Found new "first" peak
-                    maxPeak2 = maxPeak1;
-                    weightPeak2SpecDiff = weightPeak1SpecDiff;
-                    posPeak2SpecDiffFX = posPeak1SpecDiffFX;
-
-                    maxPeak1 = inst->histSpecDiff[i];
-                    weightPeak1SpecDiff = inst->histSpecDiff[i];
-                    posPeak1SpecDiffFX = (WebRtc_UWord32)(2 * i + 1);
-                } else if (inst->histSpecDiff[i] > maxPeak2)
-                {
-                    // Found new "second" peak
-                    maxPeak2 = inst->histSpecDiff[i];
-                    weightPeak2SpecDiff = inst->histSpecDiff[i];
-                    posPeak2SpecDiffFX = (WebRtc_UWord32)(2 * i + 1);
-                }
-            }
-
-            // merge the two peaks if they are close
-            if ((posPeak1SpecDiffFX - posPeak2SpecDiffFX < LIM_PEAK_SPACE_FLAT_DIFF)
-                    && (weightPeak2SpecDiff * LIM_PEAK_WEIGHT_FLAT_DIFF > weightPeak1SpecDiff))
-            {
-                weightPeak1SpecDiff += weightPeak2SpecDiff;
-                posPeak1SpecDiffFX = (posPeak1SpecDiffFX + posPeak2SpecDiffFX) >> 1;
-            }
-            // get the threshold value and check if value is within min/max range
-            inst->thresholdSpecDiff = WEBRTC_SPL_SAT(MAX_DIFF, FACTOR_1_LRT_DIFF
-                                                     * posPeak1SpecDiffFX, MIN_DIFF); //5x bigger
-            //reject if weight of peaks is not large enough
-            if (weightPeak1SpecDiff < THRES_WEIGHT_FLAT_DIFF)
-            {
-                useFeatureSpecDiff = 0;
-            }
-            // done with spectral difference feature
-        }
-
-        // select the weights between the features
-        // inst->priorModelPars[4] is weight for LRT: always selected
-        featureSum = 6 / (1 + useFeatureSpecFlat + useFeatureSpecDiff);
-        inst->weightLogLrt = featureSum;
-        inst->weightSpecFlat = useFeatureSpecFlat * featureSum;
-        inst->weightSpecDiff = useFeatureSpecDiff * featureSum;
-
-        // set histograms to zero for next update
-        WebRtcSpl_ZerosArrayW16(inst->histLrt, HIST_PAR_EST);
-        WebRtcSpl_ZerosArrayW16(inst->histSpecDiff, HIST_PAR_EST);
-        WebRtcSpl_ZerosArrayW16(inst->histSpecFlat, HIST_PAR_EST);
-    } // end of flag == 1
-}
-
-
-// Compute spectral flatness on input spectrum
-// magn is the magnitude spectrum
-// spectral flatness is returned in inst->featureSpecFlat
-void WebRtcNsx_ComputeSpectralFlatness(NsxInst_t *inst, WebRtc_UWord16 *magn)
-{
-    WebRtc_UWord32 tmpU32;
-    WebRtc_UWord32 avgSpectralFlatnessNum, avgSpectralFlatnessDen;
-
-    WebRtc_Word32 tmp32;
-    WebRtc_Word32 currentSpectralFlatness, logCurSpectralFlatness;
-
-    WebRtc_Word16 zeros, frac, intPart;
-
-    int i;
-
-    // for flatness
-    avgSpectralFlatnessNum = 0;
-    avgSpectralFlatnessDen = inst->sumMagn - (WebRtc_UWord32)magn[0]; // Q(normData-stages)
-
-    // compute log of ratio of the geometric to arithmetic mean: check for log(0) case
-    // flatness = exp( sum(log(magn[i]))/N - log(sum(magn[i])/N) )
-    //          = exp( sum(log(magn[i]))/N ) * N / sum(magn[i])
-    //          = 2^( sum(log2(magn[i]))/N - (log2(sum(magn[i])) - log2(N)) ) [This is used]
-    for (i = 1; i < inst->magnLen; i++)
-    {
-        // First bin is excluded from spectrum measures. Number of bins is now a power of 2
-        if (magn[i])
-        {
-            zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magn[i]);
-            frac = (WebRtc_Word16)(((WebRtc_UWord32)((WebRtc_UWord32)(magn[i]) << zeros)
-                    & 0x7FFFFFFF) >> 23);
-            // log2(magn(i))
-            tmpU32 = (WebRtc_UWord32)(((31 - zeros) << 8) + kLogTableFrac[frac]); // Q8
-            avgSpectralFlatnessNum += tmpU32; // Q8
-        } else
-        {
-            //if at least one frequency component is zero, treat separately
-            tmpU32 = WEBRTC_SPL_UMUL_32_16(inst->featureSpecFlat, SPECT_FLAT_TAVG_Q14); // Q24
-            inst->featureSpecFlat -= WEBRTC_SPL_RSHIFT_U32(tmpU32, 14); // Q10
-            return;
-        }
-    }
-    //ratio and inverse log: check for case of log(0)
-    zeros = WebRtcSpl_NormU32(avgSpectralFlatnessDen);
-    frac = (WebRtc_Word16)(((avgSpectralFlatnessDen << zeros) & 0x7FFFFFFF) >> 23);
-    // log2(avgSpectralFlatnessDen)
-    tmp32 = (WebRtc_Word32)(((31 - zeros) << 8) + kLogTableFrac[frac]); // Q8
-    logCurSpectralFlatness = (WebRtc_Word32)avgSpectralFlatnessNum;
-    logCurSpectralFlatness += ((WebRtc_Word32)(inst->stages - 1) << (inst->stages + 7)); // Q(8+stages-1)
-    logCurSpectralFlatness -= (tmp32 << (inst->stages - 1));
-    logCurSpectralFlatness = WEBRTC_SPL_LSHIFT_W32(logCurSpectralFlatness, 10 - inst->stages); // Q17
-    tmp32 = (WebRtc_Word32)(0x00020000 | (WEBRTC_SPL_ABS_W32(logCurSpectralFlatness)
-            & 0x0001FFFF)); //Q17
-    intPart = -(WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(logCurSpectralFlatness, 17);
-    intPart += 7; // Shift 7 to get the output in Q10 (from Q17 = -17+10)
-    if (intPart > 0)
-    {
-        currentSpectralFlatness = WEBRTC_SPL_RSHIFT_W32(tmp32, intPart);
-    } else
-    {
-        currentSpectralFlatness = WEBRTC_SPL_LSHIFT_W32(tmp32, -intPart);
-    }
-
-    //time average update of spectral flatness feature
-    tmp32 = currentSpectralFlatness - (WebRtc_Word32)inst->featureSpecFlat; // Q10
-    tmp32 = WEBRTC_SPL_MUL_32_16(SPECT_FLAT_TAVG_Q14, tmp32); // Q24
-    inst->featureSpecFlat = (WebRtc_UWord32)((WebRtc_Word32)inst->featureSpecFlat
-            + WEBRTC_SPL_RSHIFT_W32(tmp32, 14)); // Q10
-    // done with flatness feature
-}
-
-
-// Compute the difference measure between input spectrum and a template/learned noise spectrum
-// magn_tmp is the input spectrum
-// the reference/template spectrum is  inst->magn_avg_pause[i]
-// returns (normalized) spectral difference in inst->featureSpecDiff
-void WebRtcNsx_ComputeSpectralDifference(NsxInst_t *inst, WebRtc_UWord16 *magnIn)
-{
-    // This is to be calculated:
-    // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 / var(magnAvgPause)
-
-    WebRtc_UWord32 tmpU32no1, tmpU32no2;
-    WebRtc_UWord32 varMagnUFX, varPauseUFX, avgDiffNormMagnUFX;
-
-    WebRtc_Word32 tmp32no1, tmp32no2;
-    WebRtc_Word32 avgPauseFX, avgMagnFX, covMagnPauseFX;
-    WebRtc_Word32 maxPause, minPause;
-
-    WebRtc_Word16 tmp16no1;
-
-    int i, norm32, nShifts;
-
-    avgPauseFX = 0;
-    maxPause = 0;
-    minPause = inst->avgMagnPause[0]; // Q(prevQMagn)
-    // compute average quantities
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        // Compute mean of magn_pause
-        avgPauseFX += inst->avgMagnPause[i]; // in Q(prevQMagn)
-        maxPause = WEBRTC_SPL_MAX(maxPause, inst->avgMagnPause[i]);
-        minPause = WEBRTC_SPL_MIN(minPause, inst->avgMagnPause[i]);
-    }
-    // normalize by replacing div of "inst->magnLen" with "inst->stages-1" shifts
-    avgPauseFX = WEBRTC_SPL_RSHIFT_W32(avgPauseFX, inst->stages - 1);
-    avgMagnFX = (WebRtc_Word32)WEBRTC_SPL_RSHIFT_U32(inst->sumMagn, inst->stages - 1);
-    // Largest possible deviation in magnPause for (co)var calculations
-    tmp32no1 = WEBRTC_SPL_MAX(maxPause - avgPauseFX, avgPauseFX - minPause);
-    // Get number of shifts to make sure we don't get wrap around in varPause
-    nShifts = WEBRTC_SPL_MAX(0, 10 + inst->stages - WebRtcSpl_NormW32(tmp32no1));
-
-    varMagnUFX = 0;
-    varPauseUFX = 0;
-    covMagnPauseFX = 0;
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        // Compute var and cov of magn and magn_pause
-        tmp16no1 = (WebRtc_Word16)((WebRtc_Word32)magnIn[i] - avgMagnFX);
-        tmp32no2 = inst->avgMagnPause[i] - avgPauseFX;
-        varMagnUFX += (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(tmp16no1, tmp16no1); // Q(2*qMagn)
-        tmp32no1 = WEBRTC_SPL_MUL_32_16(tmp32no2, tmp16no1); // Q(prevQMagn+qMagn)
-        covMagnPauseFX += tmp32no1; // Q(prevQMagn+qMagn)
-        tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no2, nShifts); // Q(prevQMagn-minPause)
-        varPauseUFX += (WebRtc_UWord32)WEBRTC_SPL_MUL(tmp32no1, tmp32no1); // Q(2*(prevQMagn-minPause))
-    }
-    //update of average magnitude spectrum: Q(-2*stages) and averaging replaced by shifts
-    inst->curAvgMagnEnergy += WEBRTC_SPL_RSHIFT_U32(inst->magnEnergy, 2 * inst->normData
-                                                    + inst->stages - 1);
-
-    avgDiffNormMagnUFX = varMagnUFX; // Q(2*qMagn)
-    if ((varPauseUFX) && (covMagnPauseFX))
-    {
-        tmpU32no1 = (WebRtc_UWord32)WEBRTC_SPL_ABS_W32(covMagnPauseFX); // Q(prevQMagn+qMagn)
-        norm32 = WebRtcSpl_NormU32(tmpU32no1) - 16;
-        if (norm32 > 0)
-        {
-            tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, norm32); // Q(prevQMagn+qMagn+norm32)
-        } else
-        {
-            tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, -norm32); // Q(prevQMagn+qMagn+norm32)
-        }
-        tmpU32no2 = WEBRTC_SPL_UMUL(tmpU32no1, tmpU32no1); // Q(2*(prevQMagn+qMagn-norm32))
-
-        nShifts += norm32;
-        nShifts <<= 1;
-        if (nShifts < 0)
-        {
-            varPauseUFX >>= (-nShifts); // Q(2*(qMagn+norm32+minPause))
-            nShifts = 0;
-        }
-        tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no2, varPauseUFX); // Q(2*(qMagn+norm32-16+minPause))
-        tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, nShifts);
-
-        avgDiffNormMagnUFX -= WEBRTC_SPL_MIN(avgDiffNormMagnUFX, tmpU32no1); // Q(2*qMagn)
-    }
-    //normalize and compute time average update of difference feature
-    tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(avgDiffNormMagnUFX, 2 * inst->normData);
-    if (inst->featureSpecDiff > tmpU32no1)
-    {
-        tmpU32no2 = WEBRTC_SPL_UMUL_32_16(inst->featureSpecDiff - tmpU32no1,
-                                          SPECT_DIFF_TAVG_Q8); // Q(8-2*stages)
-        inst->featureSpecDiff -= WEBRTC_SPL_RSHIFT_U32(tmpU32no2, 8); // Q(-2*stages)
-    } else
-    {
-        tmpU32no2 = WEBRTC_SPL_UMUL_32_16(tmpU32no1 - inst->featureSpecDiff,
-                                          SPECT_DIFF_TAVG_Q8); // Q(8-2*stages)
-        inst->featureSpecDiff += WEBRTC_SPL_RSHIFT_U32(tmpU32no2, 8); // Q(-2*stages)
-    }
-}
-
-// Compute speech/noise probability
-// speech/noise probability is returned in: probSpeechFinal
-//snrLocPrior is the prior SNR for each frequency (in Q11)
-//snrLocPost is the post SNR for each frequency (in Q11)
-void WebRtcNsx_SpeechNoiseProb(NsxInst_t *inst, WebRtc_UWord16 *nonSpeechProbFinal,
-                               WebRtc_UWord32 *priorLocSnr, WebRtc_UWord32 *postLocSnr)
-{
-    WebRtc_UWord32 zeros, num, den, tmpU32no1, tmpU32no2, tmpU32no3;
-
-    WebRtc_Word32 invLrtFX, indPriorFX, tmp32, tmp32no1, tmp32no2, besselTmpFX32;
-    WebRtc_Word32 frac32, logTmp;
-    WebRtc_Word32 logLrtTimeAvgKsumFX;
-
-    WebRtc_Word16 indPriorFX16;
-    WebRtc_Word16 tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart;
-
-    int i, normTmp, normTmp2, nShifts;
-
-    // compute feature based on average LR factor
-    // this is the average over all frequencies of the smooth log LRT
-    logLrtTimeAvgKsumFX = 0;
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        besselTmpFX32 = (WebRtc_Word32)postLocSnr[i]; // Q11
-        normTmp = WebRtcSpl_NormU32(postLocSnr[i]);
-        num = WEBRTC_SPL_LSHIFT_U32(postLocSnr[i], normTmp); // Q(11+normTmp)
-        if (normTmp > 10)
-        {
-            den = WEBRTC_SPL_LSHIFT_U32(priorLocSnr[i], normTmp - 11); // Q(normTmp)
-        } else
-        {
-            den = WEBRTC_SPL_RSHIFT_U32(priorLocSnr[i], 11 - normTmp); // Q(normTmp)
-        }
-        besselTmpFX32 -= WEBRTC_SPL_UDIV(num, den); // Q11
-
-        // inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - log(snrLocPrior) - inst->logLrtTimeAvg[i]);
-        // Here, LRT_TAVG = 0.5
-        zeros = WebRtcSpl_NormU32(priorLocSnr[i]);
-        frac32 = (WebRtc_Word32)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19);
-        tmp32 = WEBRTC_SPL_MUL(frac32, frac32);
-        tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(tmp32, -43), 19);
-        tmp32 += WEBRTC_SPL_MUL_16_16_RSFT((WebRtc_Word16)frac32, 5412, 12);
-        frac32 = tmp32 + 37;
-        // tmp32 = log2(priorLocSnr[i])
-        tmp32 = (WebRtc_Word32)(((31 - zeros) << 12) + frac32) - (11 << 12); // Q12
-        logTmp = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_32_16(tmp32, 178), 8); // log2(priorLocSnr[i])*log(2)
-        tmp32no1 = WEBRTC_SPL_RSHIFT_W32(logTmp + inst->logLrtTimeAvgW32[i], 1); // Q12
-        inst->logLrtTimeAvgW32[i] += (besselTmpFX32 - tmp32no1); // Q12
-
-        logLrtTimeAvgKsumFX += inst->logLrtTimeAvgW32[i]; // Q12
-    }
-    inst->featureLogLrt = WEBRTC_SPL_RSHIFT_W32(logLrtTimeAvgKsumFX * 5, inst->stages + 10); // 5 = BIN_SIZE_LRT / 2
-    // done with computation of LR factor
-
-    //
-    //compute the indicator functions
-    //
-
-    // average LRT feature
-    // FLOAT code
-    // indicator0 = 0.5 * (tanh(widthPrior * (logLrtTimeAvgKsum - threshPrior0)) + 1.0);
-    tmpIndFX = 16384; // Q14(1.0)
-    tmp32no1 = logLrtTimeAvgKsumFX - inst->thresholdLogLrt; // Q12
-    nShifts = 7 - inst->stages; // WIDTH_PR_MAP_SHIFT - inst->stages + 5;
-    //use larger width in tanh map for pause regions
-    if (tmp32no1 < 0)
-    {
-        tmpIndFX = 0;
-        tmp32no1 = -tmp32no1;
-        //widthPrior = widthPrior * 2.0;
-        nShifts++;
-    }
-    tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, nShifts); // Q14
-    // compute indicator function: sigmoid map
-    tableIndex = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 14);
-    if ((tableIndex < 16) && (tableIndex >= 0))
-    {
-        tmp16no2 = kIndicatorTable[tableIndex];
-        tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
-        frac = (WebRtc_Word16)(tmp32no1 & 0x00003fff); // Q14
-        tmp16no2 += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
-        if (tmpIndFX == 0)
-        {
-            tmpIndFX = 8192 - tmp16no2; // Q14
-        } else
-        {
-            tmpIndFX = 8192 + tmp16no2; // Q14
-        }
-    }
-    indPriorFX = WEBRTC_SPL_MUL_16_16(inst->weightLogLrt, tmpIndFX); // 6*Q14
-
-    //spectral flatness feature
-    if (inst->weightSpecFlat)
-    {
-        tmpU32no1 = WEBRTC_SPL_UMUL(inst->featureSpecFlat, 400); // Q10
-        tmpIndFX = 16384; // Q14(1.0)
-        //use larger width in tanh map for pause regions
-        tmpU32no2 = inst->thresholdSpecFlat - tmpU32no1; //Q10
-        nShifts = 4;
-        if (inst->thresholdSpecFlat < tmpU32no1)
-        {
-            tmpIndFX = 0;
-            tmpU32no2 = tmpU32no1 - inst->thresholdSpecFlat;
-            //widthPrior = widthPrior * 2.0;
-            nShifts++;
-        }
-        tmp32no1 = (WebRtc_Word32)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2,
-                                                                            nShifts), 25); //Q14
-        tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts), 25); //Q14
-        // compute indicator function: sigmoid map
-        // FLOAT code
-        // indicator1 = 0.5 * (tanh(sgnMap * widthPrior * (threshPrior1 - tmpFloat1)) + 1.0);
-        tableIndex = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
-        if (tableIndex < 16)
-        {
-            tmp16no2 = kIndicatorTable[tableIndex];
-            tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
-            frac = (WebRtc_Word16)(tmpU32no1 & 0x00003fff); // Q14
-            tmp16no2 += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
-            if (tmpIndFX)
-            {
-                tmpIndFX = 8192 + tmp16no2; // Q14
-            } else
-            {
-                tmpIndFX = 8192 - tmp16no2; // Q14
-            }
-        }
-        indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecFlat, tmpIndFX); // 6*Q14
-    }
-
-    //for template spectral-difference
-    if (inst->weightSpecDiff)
-    {
-        tmpU32no1 = 0;
-        if (inst->featureSpecDiff)
-        {
-            normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
-                                     WebRtcSpl_NormU32(inst->featureSpecDiff));
-            tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(inst->featureSpecDiff, normTmp); // Q(normTmp-2*stages)
-            tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy, 20 - inst->stages
-                                              - normTmp);
-            if (tmpU32no2)
-            {
-                tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2); // Q14?? Q(20 - inst->stages)
-            } else
-            {
-                tmpU32no1 = (WebRtc_UWord32)(0x7fffffff);
-            }
-        }
-        tmpU32no3 = WEBRTC_SPL_UDIV(WEBRTC_SPL_LSHIFT_U32(inst->thresholdSpecDiff, 17), 25);
-        tmpU32no2 = tmpU32no1 - tmpU32no3;
-        nShifts = 1;
-        tmpIndFX = 16384; // Q14(1.0)
-        //use larger width in tanh map for pause regions
-        if (tmpU32no2 & 0x80000000)
-        {
-            tmpIndFX = 0;
-            tmpU32no2 = tmpU32no3 - tmpU32no1;
-            //widthPrior = widthPrior * 2.0;
-            nShifts--;
-        }
-        tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, nShifts);
-        // compute indicator function: sigmoid map
-        /* FLOAT code
-         indicator2 = 0.5 * (tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.0);
-         */
-        tableIndex = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
-        if (tableIndex < 16)
-        {
-            tmp16no2 = kIndicatorTable[tableIndex];
-            tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
-            frac = (WebRtc_Word16)(tmpU32no1 & 0x00003fff); // Q14
-            tmp16no2 += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(tmp16no1, frac,
-                                                                            14);
-            if (tmpIndFX)
-            {
-                tmpIndFX = 8192 + tmp16no2;
-            } else
-            {
-                tmpIndFX = 8192 - tmp16no2;
-            }
-        }
-        indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecDiff, tmpIndFX); // 6*Q14
-    }
-
-    //combine the indicator function with the feature weights
-    // FLOAT code
-    // indPrior = 1 - (weightIndPrior0 * indicator0 + weightIndPrior1 * indicator1 + weightIndPrior2 * indicator2);
-    indPriorFX16 = WebRtcSpl_DivW32W16ResW16(98307 - indPriorFX, 6); // Q14
-    // done with computing indicator function
-
-    //compute the prior probability
-    // FLOAT code
-    // inst->priorNonSpeechProb += PRIOR_UPDATE * (indPriorNonSpeech - inst->priorNonSpeechProb);
-    tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14
-    inst->priorNonSpeechProb += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(PRIOR_UPDATE_Q14,
-                                                                         tmp16, 14); // Q14
-
-    //final speech probability: combine prior model with LR factor:
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        // FLOAT code
-        // invLrt = exp(inst->logLrtTimeAvg[i]);
-        // invLrt = inst->priorSpeechProb * invLrt;
-        // nonSpeechProbFinal[i] = (1.0 - inst->priorSpeechProb) / (1.0 - inst->priorSpeechProb + invLrt);
-        // invLrt = (1.0 - inst->priorNonSpeechProb) * invLrt;
-        // nonSpeechProbFinal[i] = inst->priorNonSpeechProb / (inst->priorNonSpeechProb + invLrt);
-        nonSpeechProbFinal[i] = 0; // Q8
-        if ((inst->logLrtTimeAvgW32[i] < 65300) && (inst->priorNonSpeechProb > 0))
-        {
-            tmp32no1 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(inst->logLrtTimeAvgW32[i], 23637),
-                                             14); // Q12
-            intPart = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 12);
-            if (intPart < -8)
-            {
-                intPart = -8;
-            }
-            frac = (WebRtc_Word16)(tmp32no1 & 0x00000fff); // Q12
-            // Quadratic approximation of 2^frac
-            tmp32no2 = WEBRTC_SPL_RSHIFT_W32(frac * frac * 44, 19); // Q12
-            tmp32no2 += WEBRTC_SPL_MUL_16_16_RSFT(frac, 84, 7); // Q12
-            invLrtFX = WEBRTC_SPL_LSHIFT_W32(1, 8 + intPart)
-                    + WEBRTC_SPL_SHIFT_W32(tmp32no2, intPart - 4); // Q8
-
-            normTmp = WebRtcSpl_NormW32(invLrtFX);
-            normTmp2 = WebRtcSpl_NormW16((16384 - inst->priorNonSpeechProb));
-            if (normTmp + normTmp2 < 15)
-            {
-                invLrtFX = WEBRTC_SPL_RSHIFT_W32(invLrtFX, 15 - normTmp2 - normTmp); // Q(normTmp+normTmp2-7)
-                tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX, (16384 - inst->priorNonSpeechProb)); // Q(normTmp+normTmp2+7)
-                invLrtFX = WEBRTC_SPL_SHIFT_W32(tmp32no1, 7 - normTmp - normTmp2); // Q14
-            } else
-            {
-                tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX, (16384 - inst->priorNonSpeechProb)); // Q22
-                invLrtFX = WEBRTC_SPL_RSHIFT_W32(tmp32no1, 8); // Q14
-            }
-
-            tmp32no1 = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)inst->priorNonSpeechProb, 8); // Q22
-            nonSpeechProbFinal[i] = (WebRtc_UWord16)WEBRTC_SPL_DIV(tmp32no1,
-                    (WebRtc_Word32)inst->priorNonSpeechProb
-                    + invLrtFX); // Q8
-            if (7 - normTmp - normTmp2 > 0)
-            {
-                nonSpeechProbFinal[i] = 0; // Q8
-            }
-        }
-    }
-}
-
-// Transform input (speechFrame) to frequency domain magnitude (magnU16)
-void WebRtcNsx_DataAnalysis(NsxInst_t *inst, short *speechFrame, WebRtc_UWord16 *magnU16)
-{
-
-    WebRtc_UWord32 tmpU32no1, tmpU32no2;
-
-    WebRtc_Word32   tmp_1_w32 = 0;
-    WebRtc_Word32   tmp_2_w32 = 0;
-    WebRtc_Word32   sum_log_magn = 0;
-    WebRtc_Word32   sum_log_i_log_magn = 0;
-
-    WebRtc_UWord16  sum_log_magn_u16 = 0;
-    WebRtc_UWord16  tmp_u16 = 0;
-
-    WebRtc_Word16   sum_log_i = 0;
-    WebRtc_Word16   sum_log_i_square = 0;
-    WebRtc_Word16   frac = 0;
-    WebRtc_Word16   log2 = 0;
-    WebRtc_Word16   matrix_determinant = 0;
-    WebRtc_Word16   winData[ANAL_BLOCKL_MAX], maxWinData;
-    WebRtc_Word16   realImag[ANAL_BLOCKL_MAX << 1];
-
-    int i, j;
-    int outCFFT;
-    int zeros;
-    int net_norm = 0;
-    int right_shifts_in_magnU16 = 0;
-    int right_shifts_in_initMagnEst = 0;
-
-    // For lower band do all processing
-    // update analysis buffer for L band
-    WEBRTC_SPL_MEMCPY_W16(inst->analysisBuffer, inst->analysisBuffer + inst->blockLen10ms,
-                          inst->anaLen - inst->blockLen10ms);
-    WEBRTC_SPL_MEMCPY_W16(inst->analysisBuffer + inst->anaLen - inst->blockLen10ms,
-                          speechFrame, inst->blockLen10ms);
-
-    // Window data before FFT
-    for (i = 0; i < inst->anaLen; i++)
-    {
-        winData[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(inst->window[i],
-                inst->analysisBuffer[i],
-                14); // Q0
-    }
-    // Get input energy
-    inst->energyIn = WebRtcSpl_Energy(winData, (int)inst->anaLen, &(inst->scaleEnergyIn));
-
-    // Reset zero input flag
-    inst->zeroInputSignal = 0;
-    // Acquire norm for winData
-    maxWinData = WebRtcSpl_MaxAbsValueW16(winData, inst->anaLen);
-    inst->normData = WebRtcSpl_NormW16(maxWinData);
-    if (maxWinData == 0)
-    {
-        // Treat zero input separately.
-        inst->zeroInputSignal = 1;
-        return;
-    }
-
-    // Determine the net normalization in the frequency domain
-    net_norm = inst->stages - inst->normData;
-    // Track lowest normalization factor and use it to prevent wrap around in shifting
-    right_shifts_in_magnU16 = inst->normData - inst->minNorm;
-    right_shifts_in_initMagnEst = WEBRTC_SPL_MAX(-right_shifts_in_magnU16, 0);
-    inst->minNorm -= right_shifts_in_initMagnEst;
-    right_shifts_in_magnU16 = WEBRTC_SPL_MAX(right_shifts_in_magnU16, 0);
-
-    // create realImag as winData interleaved with zeros (= imag. part), normalize it
-    for (i = 0; i < inst->anaLen; i++)
-    {
-        j = WEBRTC_SPL_LSHIFT_W16(i, 1);
-        realImag[j] = WEBRTC_SPL_LSHIFT_W16(winData[i], inst->normData); // Q(normData)
-        realImag[j + 1] = 0; // Insert zeros in imaginary part
-    }
-
-    // bit-reverse position of elements in array and FFT the array
-    WebRtcSpl_ComplexBitReverse(realImag, inst->stages); // Q(normData-stages)
-    outCFFT = WebRtcSpl_ComplexFFT(realImag, inst->stages, 1);
-
-    inst->imag[0] = 0; // Q(normData-stages)
-    inst->imag[inst->anaLen2] = 0;
-    inst->real[0] = realImag[0]; // Q(normData-stages)
-    inst->real[inst->anaLen2] = realImag[inst->anaLen];
-    // Q(2*(normData-stages))
-    inst->magnEnergy = (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(inst->real[0], inst->real[0]);
-    inst->magnEnergy += (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(inst->real[inst->anaLen2],
-                                                             inst->real[inst->anaLen2]);
-    magnU16[0] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(inst->real[0]); // Q(normData-stages)
-    magnU16[inst->anaLen2] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(inst->real[inst->anaLen2]);
-    inst->sumMagn = (WebRtc_UWord32)magnU16[0]; // Q(normData-stages)
-    inst->sumMagn += (WebRtc_UWord32)magnU16[inst->anaLen2];
-
-    // Gather information during startup for noise parameter estimation
-    if (inst->blockIndex < END_STARTUP_SHORT)
-    {
-        // Switch initMagnEst to Q(minNorm-stages)
-        inst->initMagnEst[0] = WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[0],
-                                                     right_shifts_in_initMagnEst);
-        inst->initMagnEst[inst->anaLen2] =
-                WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[inst->anaLen2],
-                                      right_shifts_in_initMagnEst); // Q(minNorm-stages)
-
-        // Shift magnU16 to same domain as initMagnEst
-        tmpU32no1 = WEBRTC_SPL_RSHIFT_W32((WebRtc_UWord32)magnU16[0],
-                                          right_shifts_in_magnU16); // Q(minNorm-stages)
-        tmpU32no2 = WEBRTC_SPL_RSHIFT_W32((WebRtc_UWord32)magnU16[inst->anaLen2],
-                                          right_shifts_in_magnU16); // Q(minNorm-stages)
-
-        // Update initMagnEst
-        inst->initMagnEst[0] += tmpU32no1; // Q(minNorm-stages)
-        inst->initMagnEst[inst->anaLen2] += tmpU32no2; // Q(minNorm-stages)
-
-        log2 = 0;
-        if (magnU16[inst->anaLen2])
-        {
-            // Calculate log2(magnU16[inst->anaLen2])
-            zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magnU16[inst->anaLen2]);
-            frac = (WebRtc_Word16)((((WebRtc_UWord32)magnU16[inst->anaLen2] << zeros) &
-                    0x7FFFFFFF) >> 23); // Q8
-            // log2(magnU16(i)) in Q8
-            log2 = (WebRtc_Word16)(((31 - zeros) << 8) + kLogTableFrac[frac]);
-        }
-
-        sum_log_magn = (WebRtc_Word32)log2; // Q8
-        // sum_log_i_log_magn in Q17
-        sum_log_i_log_magn = (WEBRTC_SPL_MUL_16_16(kLogIndex[inst->anaLen2], log2) >> 3);
-    }
-
-    for (i = 1; i < inst->anaLen2; i++)
-    {
-        j = WEBRTC_SPL_LSHIFT_W16(i, 1);
-        inst->real[i] = realImag[j];
-        inst->imag[i] = -realImag[j + 1];
-        // magnitude spectrum
-        // energy in Q(2*(normData-stages))
-        tmpU32no1 = (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(realImag[j], realImag[j]);
-        tmpU32no1 += (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(realImag[j + 1], realImag[j + 1]);
-        inst->magnEnergy += tmpU32no1; // Q(2*(normData-stages))
-
-        magnU16[i] = (WebRtc_UWord16)WebRtcSpl_Sqrt(tmpU32no1); // Q(normData-stages)
-        inst->sumMagn += (WebRtc_UWord32)magnU16[i]; // Q(normData-stages)
-        if (inst->blockIndex < END_STARTUP_SHORT)
-        {
-            // Switch initMagnEst to Q(minNorm-stages)
-            inst->initMagnEst[i] = WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[i],
-                                                         right_shifts_in_initMagnEst);
-
-            // Shift magnU16 to same domain as initMagnEst, i.e., Q(minNorm-stages)
-            tmpU32no1 = WEBRTC_SPL_RSHIFT_W32((WebRtc_UWord32)magnU16[i],
-                                              right_shifts_in_magnU16);
-            // Update initMagnEst
-            inst->initMagnEst[i] += tmpU32no1; // Q(minNorm-stages)
-
-            if (i >= kStartBand)
-            {
-                // For pink noise estimation. Collect data neglecting lower frequency band
-                log2 = 0;
-                if (magnU16[i])
-                {
-                    zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magnU16[i]);
-                    frac = (WebRtc_Word16)((((WebRtc_UWord32)magnU16[i] << zeros) &
-                            0x7FFFFFFF) >> 23);
-                    // log2(magnU16(i)) in Q8
-                    log2 = (WebRtc_Word16)(((31 - zeros) << 8) + kLogTableFrac[frac]);
-                }
-                sum_log_magn += (WebRtc_Word32)log2; // Q8
-                // sum_log_i_log_magn in Q17
-                sum_log_i_log_magn += (WEBRTC_SPL_MUL_16_16(kLogIndex[i], log2) >> 3);
-            }
-        }
-    }
-
-    //compute simplified noise model during startup
-    if (inst->blockIndex < END_STARTUP_SHORT)
-    {
-        // Estimate White noise
-        // Switch whiteNoiseLevel to Q(minNorm-stages)
-        inst->whiteNoiseLevel = WEBRTC_SPL_RSHIFT_U32(inst->whiteNoiseLevel,
-                                                      right_shifts_in_initMagnEst);
-
-        // Update the average magnitude spectrum, used as noise estimate.
-        tmpU32no1 = WEBRTC_SPL_UMUL_32_16(inst->sumMagn, inst->overdrive);
-        tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, inst->stages + 8);
-
-        // Replacing division above with 'stages' shifts
-        // Shift to same Q-domain as whiteNoiseLevel
-        tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, right_shifts_in_magnU16);
-        // This operation is safe from wrap around as long as END_STARTUP_SHORT < 128
-        assert(END_STARTUP_SHORT < 128);
-        inst->whiteNoiseLevel += tmpU32no1; // Q(minNorm-stages)
-
-        // Estimate Pink noise parameters
-        // Denominator used in both parameter estimates.
-        // The value is only dependent on the size of the frequency band (kStartBand)
-        // and to reduce computational complexity stored in a table (kDeterminantEstMatrix[])
-        matrix_determinant = kDeterminantEstMatrix[kStartBand]; // Q0
-        sum_log_i = kSumLogIndex[kStartBand]; // Q5
-        sum_log_i_square = kSumSquareLogIndex[kStartBand]; // Q2
-        if (inst->fs == 8000)
-        {
-            // Adjust values to shorter blocks in narrow band.
-            tmp_1_w32 = (WebRtc_Word32)matrix_determinant;
-            tmp_1_w32 += WEBRTC_SPL_MUL_16_16_RSFT(kSumLogIndex[65], sum_log_i, 9);
-            tmp_1_w32 -= WEBRTC_SPL_MUL_16_16_RSFT(kSumLogIndex[65], kSumLogIndex[65], 10);
-            tmp_1_w32 -= WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)sum_log_i_square, 4);
-            tmp_1_w32 -= WEBRTC_SPL_MUL_16_16_RSFT((WebRtc_Word16)(inst->magnLen
-                    - kStartBand), kSumSquareLogIndex[65], 2);
-            matrix_determinant = (WebRtc_Word16)tmp_1_w32;
-            sum_log_i -= kSumLogIndex[65]; // Q5
-            sum_log_i_square -= kSumSquareLogIndex[65]; // Q2
-        }
-
-        // Necessary number of shifts to fit sum_log_magn in a word16
-        zeros = 16 - WebRtcSpl_NormW32(sum_log_magn);
-        if (zeros < 0)
-        {
-            zeros = 0;
-        }
-        tmp_1_w32 = WEBRTC_SPL_LSHIFT_W32(sum_log_magn, 1); // Q9
-        sum_log_magn_u16 = (WebRtc_UWord16)WEBRTC_SPL_RSHIFT_W32(tmp_1_w32, zeros);//Q(9-zeros)
-
-        // Calculate and update pinkNoiseNumerator. Result in Q11.
-        tmp_2_w32 = WEBRTC_SPL_MUL_16_U16(sum_log_i_square, sum_log_magn_u16); // Q(11-zeros)
-        tmpU32no1 = WEBRTC_SPL_RSHIFT_U32((WebRtc_UWord32)sum_log_i_log_magn, 12); // Q5
-
-        // Shift the largest value of sum_log_i and tmp32no3 before multiplication
-        tmp_u16 = WEBRTC_SPL_LSHIFT_U16((WebRtc_UWord16)sum_log_i, 1); // Q6
-        if ((WebRtc_UWord32)sum_log_i > tmpU32no1)
-        {
-            tmp_u16 = WEBRTC_SPL_RSHIFT_U16(tmp_u16, zeros);
-        }
-        else
-        {
-            tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, zeros);
-        }
-        tmp_2_w32 -= (WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(tmpU32no1, tmp_u16); // Q(11-zeros)
-        matrix_determinant = WEBRTC_SPL_RSHIFT_W16(matrix_determinant, zeros); // Q(-zeros)
-        tmp_2_w32 = WebRtcSpl_DivW32W16(tmp_2_w32, matrix_determinant); // Q11
-        tmp_2_w32 += WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)net_norm, 11); // Q11
-        if (tmp_2_w32 < 0)
-        {
-            tmp_2_w32 = 0;
-        }
-        inst->pinkNoiseNumerator += tmp_2_w32; // Q11
-
-        // Calculate and update pinkNoiseExp. Result in Q14.
-        tmp_2_w32 = WEBRTC_SPL_MUL_16_U16(sum_log_i, sum_log_magn_u16); // Q(14-zeros)
-        tmp_1_w32 = WEBRTC_SPL_RSHIFT_W32(sum_log_i_log_magn, 3 + zeros);
-        tmp_1_w32 = WEBRTC_SPL_MUL((WebRtc_Word32)(inst->magnLen - kStartBand),
-                                   tmp_1_w32);
-        tmp_2_w32 -= tmp_1_w32; // Q(14-zeros)
-        if (tmp_2_w32 > 0)
-        {
-            // If the exponential parameter is negative force it to zero, which means a
-            // flat spectrum.
-            tmp_1_w32 = WebRtcSpl_DivW32W16(tmp_2_w32, matrix_determinant); // Q14
-            inst->pinkNoiseExp += WEBRTC_SPL_SAT(16384, tmp_1_w32, 0); // Q14
-        }
-    }
-}
-
-void WebRtcNsx_DataSynthesis(NsxInst_t *inst, short *outFrame)
-{
-    WebRtc_Word32 tmp32no1;
-    WebRtc_Word32 energyOut;
-
-    WebRtc_Word16 realImag[ANAL_BLOCKL_MAX << 1];
-    WebRtc_Word16 tmp16no1, tmp16no2;
-    WebRtc_Word16 energyRatio;
-    WebRtc_Word16 gainFactor, gainFactor1, gainFactor2;
-
-    int i, j;
-    int outCIFFT;
-    int scaleEnergyOut = 0;
-
-    if (inst->zeroInputSignal)
-    {
-        // synthesize the special case of zero input
-        // read out fully processed segment
-        for (i = 0; i < inst->blockLen10ms; i++)
-        {
-            outFrame[i] = inst->synthesisBuffer[i]; // Q0
-        }
-        // update synthesis buffer
-        WEBRTC_SPL_MEMCPY_W16(inst->synthesisBuffer,
-                              inst->synthesisBuffer + inst->blockLen10ms,
-                              inst->anaLen - inst->blockLen10ms);
-        WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer + inst->anaLen - inst->blockLen10ms,
-                                inst->blockLen10ms);
-        return;
-    }
-    // Filter the data in the frequency domain
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        inst->real[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->real[i],
-                (WebRtc_Word16)(inst->noiseSupFilter[i]), 14); // Q(normData-stages)
-        inst->imag[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->imag[i],
-                (WebRtc_Word16)(inst->noiseSupFilter[i]), 14); // Q(normData-stages)
-    }
-    // back to time domain
-    // Create spectrum
-    realImag[0] = inst->real[0];
-    realImag[1] = -inst->imag[0];
-    for (i = 1; i < inst->anaLen2; i++)
-    {
-        j = WEBRTC_SPL_LSHIFT_W16(i, 1);
-        tmp16no1 = (inst->anaLen << 1) - j;
-        realImag[j] = inst->real[i];
-        realImag[j + 1] = -inst->imag[i];
-        realImag[tmp16no1] = inst->real[i];
-        realImag[tmp16no1 + 1] = inst->imag[i];
-    }
-    realImag[inst->anaLen] = inst->real[inst->anaLen2];
-    realImag[inst->anaLen + 1] = -inst->imag[inst->anaLen2];
-
-    // bit-reverse position of elements in array and IFFT it
-    WebRtcSpl_ComplexBitReverse(realImag, inst->stages);
-    outCIFFT = WebRtcSpl_ComplexIFFT(realImag, inst->stages, 1);
-
-    for (i = 0; i < inst->anaLen; i++)
-    {
-        j = WEBRTC_SPL_LSHIFT_W16(i, 1);
-        tmp32no1 = WEBRTC_SPL_SHIFT_W32((WebRtc_Word32)realImag[j], outCIFFT - inst->normData);
-        inst->real[i] = (WebRtc_Word16)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, tmp32no1,
-                                                      WEBRTC_SPL_WORD16_MIN);
-    }
-
-    //scale factor: only do it after END_STARTUP_LONG time
-    gainFactor = 8192; // 8192 = Q13(1.0)
-    if (inst->gainMap == 1 &&
-        inst->blockIndex > END_STARTUP_LONG &&
-        inst->energyIn > 0)
-    {
-        energyOut = WebRtcSpl_Energy(inst->real, (int)inst->anaLen, &scaleEnergyOut); // Q(-scaleEnergyOut)
-        if (scaleEnergyOut == 0 && !(energyOut & 0x7f800000))
-        {
-            energyOut = WEBRTC_SPL_SHIFT_W32(energyOut, 8 + scaleEnergyOut
-                                             - inst->scaleEnergyIn);
-        } else
-        {
-            inst->energyIn = WEBRTC_SPL_RSHIFT_W32(inst->energyIn, 8 + scaleEnergyOut
-                                                   - inst->scaleEnergyIn); // Q(-8-scaleEnergyOut)
-        }
-
-        assert(inst->energyIn > 0);
-        energyRatio = (WebRtc_Word16)WEBRTC_SPL_DIV(energyOut
-                + WEBRTC_SPL_RSHIFT_W32(inst->energyIn, 1), inst->energyIn); // Q8
-
-        //         // original FLOAT code
-        //         if (gain > blim) {
-        //         factor1=1.0+1.3*(gain-blim);
-        //         if (gain*factor1 > 1.0) { // FLOAT
-        //         factor1 = 1.0/gain; // FLOAT
-        //         }
-        //         }
-        //         else {
-        //         factor1=1.0; // FLOAT
-        //         }
-        //
-        //         if (gain > blim) {
-        //         factor2=1.0; //FLOAT
-        //         }
-        //         else {
-        //         //don't reduce scale too much for pause regions: attenuation here should be controlled by flooring
-        //         factor2=1.0-0.3*(blim-gain); // FLOAT
-        //         if (gain <= inst->denoiseBound) {
-        //         factor2=1.0-0.3*(blim-inst->denoiseBound); // FLOAT
-        //         }
-        //         }
-
-        // all done in lookup tables now
-        gainFactor1 = kFactor1Table[energyRatio]; // Q8
-        gainFactor2 = inst->factor2Table[energyRatio]; // Q8
-
-        //combine both scales with speech/noise prob: note prior (priorSpeechProb) is not frequency dependent
-
-        // factor = inst->priorSpeechProb*factor1 + (1.0-inst->priorSpeechProb)*factor2; // original code
-        tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(16384 - inst->priorNonSpeechProb,
-                                                            gainFactor1, 14); // Q13 16384 = Q14(1.0)
-        tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->priorNonSpeechProb,
-                                                            gainFactor2, 14); // Q13;
-        gainFactor = tmp16no1 + tmp16no2; // Q13
-    } // out of flag_gain_map==1
-
-    // synthesis
-    for (i = 0; i < inst->anaLen; i++)
-    {
-        tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(inst->window[i],
-                                                                       inst->real[i], 14); // Q0, window in Q14
-        tmp32no1 = WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(tmp16no1, gainFactor, 13); // Q0
-        // Down shift with rounding
-        tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, tmp32no1,
-                                                 WEBRTC_SPL_WORD16_MIN); // Q0
-        inst->synthesisBuffer[i] = WEBRTC_SPL_ADD_SAT_W16(inst->synthesisBuffer[i], tmp16no2); // Q0
-    }
-
-    // read out fully processed segment
-    for (i = 0; i < inst->blockLen10ms; i++)
-    {
-        outFrame[i] = inst->synthesisBuffer[i]; // Q0
-    }
-    // update synthesis buffer
-    WEBRTC_SPL_MEMCPY_W16(inst->synthesisBuffer, inst->synthesisBuffer + inst->blockLen10ms,
-                          inst->anaLen - inst->blockLen10ms);
-    WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer + inst->anaLen - inst->blockLen10ms,
-                            inst->blockLen10ms);
-}
-
-int WebRtcNsx_ProcessCore(NsxInst_t *inst, short *speechFrame, short *speechFrameHB,
-                          short *outFrame, short *outFrameHB)
-{
-    // main routine for noise suppression
-
-    WebRtc_UWord32 tmpU32no1, tmpU32no2, tmpU32no3;
-    WebRtc_UWord32 satMax, maxNoiseU32;
-    WebRtc_UWord32 tmpMagnU32, tmpNoiseU32;
-    WebRtc_UWord32 nearMagnEst;
-    WebRtc_UWord32 noiseUpdateU32;
-    WebRtc_UWord32 noiseU32[HALF_ANAL_BLOCKL];
-    WebRtc_UWord32 postLocSnr[HALF_ANAL_BLOCKL];
-    WebRtc_UWord32 priorLocSnr[HALF_ANAL_BLOCKL];
-    WebRtc_UWord32 prevNearSnr[HALF_ANAL_BLOCKL];
-    WebRtc_UWord32 curNearSnr;
-    WebRtc_UWord32 priorSnr;
-    WebRtc_UWord32 noise_estimate = 0;
-    WebRtc_UWord32 noise_estimate_avg = 0;
-    WebRtc_UWord32 numerator = 0;
-
-    WebRtc_Word32 tmp32no1, tmp32no2;
-    WebRtc_Word32 pink_noise_num_avg = 0;
-
-    WebRtc_UWord16 tmpU16no1;
-    WebRtc_UWord16 magnU16[HALF_ANAL_BLOCKL];
-    WebRtc_UWord16 prevNoiseU16[HALF_ANAL_BLOCKL];
-    WebRtc_UWord16 nonSpeechProbFinal[HALF_ANAL_BLOCKL];
-    WebRtc_UWord16 gammaNoise, prevGammaNoise;
-    WebRtc_UWord16 noiseSupFilterTmp[HALF_ANAL_BLOCKL];
-
-    WebRtc_Word16 qMagn, qNoise;
-    WebRtc_Word16 avgProbSpeechHB, gainModHB, avgFilterGainHB, gainTimeDomainHB;
-    WebRtc_Word16 tmp16no1;
-    WebRtc_Word16 int_part = 0;
-    WebRtc_Word16 frac_part = 0;
-    WebRtc_Word16 pink_noise_exp_avg = 0;
-
-    int i;
-    int nShifts, postShifts;
-    int norm32no1, norm32no2;
-    int flag, sign;
-    int q_domain_to_use = 0;
-
-#ifdef NS_FILEDEBUG
-    fwrite(spframe, sizeof(short), inst->blockLen10ms, inst->infile);
-#endif
-
-    // Check that initialization has been done
-    if (inst->initFlag != 1)
-    {
-        return -1;
-    }
-    // Check for valid pointers based on sampling rate
-    if ((inst->fs == 32000) && (speechFrameHB == NULL))
-    {
-        return -1;
-    }
-
-    // Store speechFrame and transform to frequency domain
-    WebRtcNsx_DataAnalysis(inst, speechFrame, magnU16);
-
-    if (inst->zeroInputSignal)
-    {
-        WebRtcNsx_DataSynthesis(inst, outFrame);
-
-        if (inst->fs == 32000)
-        {
-            // update analysis buffer for H band
-            // append new data to buffer FX
-            WEBRTC_SPL_MEMCPY_W16(inst->dataBufHBFX, inst->dataBufHBFX + inst->blockLen10ms,
-                                  inst->anaLen - inst->blockLen10ms);
-            WEBRTC_SPL_MEMCPY_W16(inst->dataBufHBFX + inst->anaLen - inst->blockLen10ms,
-                                  speechFrameHB, inst->blockLen10ms);
-            for (i = 0; i < inst->blockLen10ms; i++)
-            {
-                outFrameHB[i] = inst->dataBufHBFX[i]; // Q0
-            }
-        } // end of H band gain computation
-        return 0;
-    }
-
-    // Update block index when we have something to process
-    inst->blockIndex++;
-    //
-
-    // Norm of magn
-    qMagn = inst->normData - inst->stages;
-
-    // Compute spectral flatness on input spectrum
-    WebRtcNsx_ComputeSpectralFlatness(inst, magnU16);
-
-    // quantile noise estimate
-    WebRtcNsx_NoiseEstimation(inst, magnU16, noiseU32, &qNoise);
-
-    //noise estimate from previous frame
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        prevNoiseU16[i] = (WebRtc_UWord16)WEBRTC_SPL_RSHIFT_U32(inst->prevNoiseU32[i], 11); // Q(prevQNoise)
-    }
-
-    if (inst->blockIndex < END_STARTUP_SHORT)
-    {
-        // Noise Q-domain to be used later; see description at end of section.
-        q_domain_to_use = WEBRTC_SPL_MIN((int)qNoise, inst->minNorm - inst->stages);
-
-        // Calculate frequency independent parts in parametric noise estimate and calculate
-        // the estimate for the lower frequency band (same values for all frequency bins)
-        if (inst->pinkNoiseExp)
-        {
-            pink_noise_exp_avg = (WebRtc_Word16)WebRtcSpl_DivW32W16(inst->pinkNoiseExp,
-                                                (WebRtc_Word16)(inst->blockIndex + 1)); // Q14
-            pink_noise_num_avg = WebRtcSpl_DivW32W16(inst->pinkNoiseNumerator,
-                                 (WebRtc_Word16)(inst->blockIndex + 1)); // Q11
-            WebRtcNsx_CalcParametricNoiseEstimate(inst,
-                                                  pink_noise_exp_avg,
-                                                  pink_noise_num_avg,
-                                                  kStartBand,
-                                                  &noise_estimate,
-                                                  &noise_estimate_avg);
-        }
-        else
-        {
-            // Use white noise estimate if we have poor pink noise parameter estimates
-            noise_estimate = inst->whiteNoiseLevel; // Q(minNorm-stages)
-            noise_estimate_avg = noise_estimate / (inst->blockIndex + 1); // Q(minNorm-stages)
-        }
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            // Estimate the background noise using the pink noise parameters if permitted
-            if ((inst->pinkNoiseExp) && (i >= kStartBand))
-            {
-                // Reset noise_estimate
-                noise_estimate = 0;
-                noise_estimate_avg = 0;
-                // Calculate the parametric noise estimate for current frequency bin
-                WebRtcNsx_CalcParametricNoiseEstimate(inst,
-                                                      pink_noise_exp_avg,
-                                                      pink_noise_num_avg,
-                                                      i,
-                                                      &noise_estimate,
-                                                      &noise_estimate_avg);
-            }
-            // Calculate parametric Wiener filter
-            noiseSupFilterTmp[i] = inst->denoiseBound;
-            if (inst->initMagnEst[i])
-            {
-                // numerator = (initMagnEst - noise_estimate * overdrive)
-                // Result in Q(8+minNorm-stages)
-                tmpU32no1 = WEBRTC_SPL_UMUL_32_16(noise_estimate, inst->overdrive);
-                numerator = WEBRTC_SPL_LSHIFT_U32(inst->initMagnEst[i], 8);
-                if (numerator > tmpU32no1)
-                {
-                    // Suppression filter coefficient larger than zero, so calculate.
-                    numerator -= tmpU32no1;
-
-                    // Determine number of left shifts in numerator for best accuracy after
-                    // division
-                    nShifts = WebRtcSpl_NormU32(numerator);
-                    nShifts = WEBRTC_SPL_SAT(6, nShifts, 0);
-
-                    // Shift numerator to Q(nShifts+8+minNorm-stages)
-                    numerator = WEBRTC_SPL_LSHIFT_U32(numerator, nShifts);
-
-                    // Shift denominator to Q(nShifts-6+minNorm-stages)
-                    tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[i], 6 - nShifts);
-                    tmpU32no2 = WEBRTC_SPL_UDIV(numerator, tmpU32no1); // Q14
-                    noiseSupFilterTmp[i] = (WebRtc_UWord16)WEBRTC_SPL_SAT(16384, tmpU32no2,
-                                           (WebRtc_UWord32)(inst->denoiseBound)); // Q14
-                }
-            }
-            // Weight quantile noise 'noiseU32' with modeled noise 'noise_estimate_avg'
-            // 'noiseU32 is in Q(qNoise) and 'noise_estimate' in Q(minNorm-stages)
-            // To guarantee that we do not get wrap around when shifting to the same domain
-            // we use the lowest one. Furthermore, we need to save 6 bits for the weighting.
-            // 'noise_estimate_avg' can handle this operation by construction, but 'noiseU32'
-            // may not.
-
-            // Shift 'noiseU32' to 'q_domain_to_use'
-            tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], (int)qNoise - q_domain_to_use);
-            // Shift 'noise_estimate_avg' to 'q_domain_to_use'
-            tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(noise_estimate_avg, inst->minNorm - inst->stages
-                                              - q_domain_to_use);
-            // Make a simple check to see if we have enough room for weighting 'tmpU32no1'
-            // without wrap around
-            nShifts = 0;
-            if (tmpU32no1 & 0xfc000000) {
-                tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 6);
-                tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, 6);
-                nShifts = 6;
-            }
-            // Add them together and divide by startup length
-            noiseU32[i] = WebRtcSpl_DivU32U16(tmpU32no1 + tmpU32no2, END_STARTUP_SHORT);
-            // Shift back if necessary
-            noiseU32[i] = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], nShifts);
-        }
-        // Update new Q-domain for 'noiseU32'
-        qNoise = q_domain_to_use;
-    }
-    // compute average signal during END_STARTUP_LONG time:
-    // used to normalize spectral difference measure
-    if (inst->blockIndex < END_STARTUP_LONG)
-    {
-        // substituting division with shift ending up in Q(-2*stages)
-        inst->timeAvgMagnEnergyTmp
-                += WEBRTC_SPL_RSHIFT_U32(inst->magnEnergy,
-                                         2 * inst->normData + inst->stages - 1);
-        inst->timeAvgMagnEnergy = WebRtcSpl_DivU32U16(inst->timeAvgMagnEnergyTmp,
-                                                      inst->blockIndex + 1);
-    }
-
-    //start processing at frames == converged+1
-    // STEP 1: compute prior and post SNR based on quantile noise estimates
-
-    // compute direct decision (DD) estimate of prior SNR: needed for new method
-    satMax = (WebRtc_UWord32)1048575;// Largest possible value without getting overflow despite shifting 12 steps
-    postShifts = 6 + qMagn - qNoise;
-    nShifts = 5 - inst->prevQMagn + inst->prevQNoise;
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        // FLOAT:
-        // post SNR
-        // postLocSnr[i] = 0.0;
-        // if (magn[i] > noise[i])
-        // {
-        //   postLocSnr[i] = magn[i] / (noise[i] + 0.0001);
-        // }
-        // // previous post SNR
-        // // previous estimate: based on previous frame with gain filter (smooth is previous filter)
-        //
-        // prevNearSnr[i] = inst->prevMagnU16[i] / (inst->noisePrev[i] + 0.0001) * (inst->smooth[i]);
-        //
-        // // DD estimate is sum of two terms: current estimate and previous estimate
-        // // directed decision update of priorSnr (or we actually store [2*priorSnr+1])
-        //
-        // priorLocSnr[i] = DD_PR_SNR * prevNearSnr[i] + (1.0 - DD_PR_SNR) * (postLocSnr[i] - 1.0);
-
-        // calculate post SNR: output in Q11
-        postLocSnr[i] = 2048; // 1.0 in Q11
-        tmpU32no1 = WEBRTC_SPL_LSHIFT_U32((WebRtc_UWord32)magnU16[i], 6); // Q(6+qMagn)
-        if (postShifts < 0)
-        {
-            tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], -postShifts); // Q(6+qMagn)
-        } else
-        {
-            tmpU32no2 = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], postShifts); // Q(6+qMagn)
-        }
-        if (tmpU32no1 > tmpU32no2)
-        {
-            // Current magnitude larger than noise
-            tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, 11); // Q(17+qMagn)
-            if (tmpU32no2)
-            {
-                tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2); // Q11
-                postLocSnr[i] = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11
-            } else
-            {
-                postLocSnr[i] = satMax;
-            }
-        }
-
-        // calculate prevNearSnr[i] and save for later instead of recalculating it later
-        nearMagnEst = WEBRTC_SPL_UMUL_16_16(inst->prevMagnU16[i], inst->noiseSupFilter[i]); // Q(prevQMagn+14)
-        tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(nearMagnEst, 3); // Q(prevQMagn+17)
-        tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->prevNoiseU32[i], nShifts); // Q(prevQMagn+6)
-
-        if (tmpU32no2)
-        {
-            tmpU32no1 = WEBRTC_SPL_DIV(tmpU32no1, tmpU32no2); // Q11
-            tmpU32no1 = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11
-        } else
-        {
-            tmpU32no1 = satMax; // Q11
-        }
-        prevNearSnr[i] = tmpU32no1; // Q11
-
-        //directed decision update of priorSnr
-        tmpU32no1 = WEBRTC_SPL_UMUL_32_16(prevNearSnr[i], DD_PR_SNR_Q11); // Q22
-        tmpU32no2 = WEBRTC_SPL_UMUL_32_16(postLocSnr[i] - 2048, ONE_MINUS_DD_PR_SNR_Q11); // Q22
-        priorSnr = tmpU32no1 + tmpU32no2 + 512; // Q22 (added 512 for rounding)
-        // priorLocSnr = 1 + 2*priorSnr
-        priorLocSnr[i] = 2048 + WEBRTC_SPL_RSHIFT_U32(priorSnr, 10); // Q11
-    } // end of loop over frequencies
-    // done with step 1: DD computation of prior and post SNR
-
-    // STEP 2: compute speech/noise likelihood
-
-    //compute difference of input spectrum with learned/estimated noise spectrum
-    WebRtcNsx_ComputeSpectralDifference(inst, magnU16);
-    //compute histograms for determination of parameters (thresholds and weights for features)
-    //parameters are extracted once every window time (=inst->modelUpdate)
-    //counter update
-    inst->cntThresUpdate++;
-    flag = (int)(inst->cntThresUpdate == inst->modelUpdate);
-    //update histogram
-    WebRtcNsx_FeatureParameterExtraction(inst, flag);
-    //compute model parameters
-    if (flag)
-    {
-        inst->cntThresUpdate = 0; // Reset counter
-        //update every window:
-        // get normalization for spectral difference for next window estimate
-
-        // Shift to Q(-2*stages)
-        inst->curAvgMagnEnergy = WEBRTC_SPL_RSHIFT_U32(inst->curAvgMagnEnergy, STAT_UPDATES);
-
-        tmpU32no1 = (inst->curAvgMagnEnergy + inst->timeAvgMagnEnergy + 1) >> 1; //Q(-2*stages)
-        // Update featureSpecDiff
-        if ((tmpU32no1 != inst->timeAvgMagnEnergy) && (inst->featureSpecDiff))
-        {
-            norm32no1 = 0;
-            tmpU32no3 = tmpU32no1;
-            while (0xFFFF0000 & tmpU32no3)
-            {
-                tmpU32no3 >>= 1;
-                norm32no1++;
-            }
-            tmpU32no2 = inst->featureSpecDiff;
-            while (0xFFFF0000 & tmpU32no2)
-            {
-                tmpU32no2 >>= 1;
-                norm32no1++;
-            }
-            tmpU32no3 = WEBRTC_SPL_UMUL(tmpU32no3, tmpU32no2);
-            tmpU32no3 = WEBRTC_SPL_UDIV(tmpU32no3, inst->timeAvgMagnEnergy);
-            if (WebRtcSpl_NormU32(tmpU32no3) < norm32no1)
-            {
-                inst->featureSpecDiff = 0x007FFFFF;
-            } else
-            {
-                inst->featureSpecDiff = WEBRTC_SPL_MIN(0x007FFFFF,
-                                        WEBRTC_SPL_LSHIFT_U32(tmpU32no3, norm32no1));
-            }
-        }
-
-        inst->timeAvgMagnEnergy = tmpU32no1; // Q(-2*stages)
-        inst->curAvgMagnEnergy = 0;
-    }
-
-    //compute speech/noise probability
-    WebRtcNsx_SpeechNoiseProb(inst, nonSpeechProbFinal, priorLocSnr, postLocSnr);
-
-    //time-avg parameter for noise update
-    gammaNoise = NOISE_UPDATE_Q8; // Q8
-
-    maxNoiseU32 = 0;
-    postShifts = inst->prevQNoise - qMagn;
-    nShifts = inst->prevQMagn - qMagn;
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        // temporary noise update: use it for speech frames if update value is less than previous
-        // the formula has been rewritten into:
-        // noiseUpdate = noisePrev[i] + (1 - gammaNoise) * nonSpeechProb * (magn[i] - noisePrev[i])
-
-        if (postShifts < 0)
-        {
-            tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(magnU16[i], -postShifts); // Q(prevQNoise)
-        } else
-        {
-            tmpU32no2 = WEBRTC_SPL_LSHIFT_U32(magnU16[i], postShifts); // Q(prevQNoise)
-        }
-        if (prevNoiseU16[i] > tmpU32no2)
-        {
-            sign = -1;
-            tmpU32no1 = prevNoiseU16[i] - tmpU32no2;
-        } else
-        {
-            sign = 1;
-            tmpU32no1 = tmpU32no2 - prevNoiseU16[i];
-        }
-        noiseUpdateU32 = inst->prevNoiseU32[i]; // Q(prevQNoise+11)
-        tmpU32no3 = 0;
-        if ((tmpU32no1) && (nonSpeechProbFinal[i]))
-        {
-            // This value will be used later, if gammaNoise changes
-            tmpU32no3 = WEBRTC_SPL_UMUL_32_16(tmpU32no1, nonSpeechProbFinal[i]); // Q(prevQNoise+8)
-            if (0x7c000000 & tmpU32no3)
-            {
-                // Shifting required before multiplication
-                tmpU32no2
-                        = WEBRTC_SPL_UMUL_32_16(WEBRTC_SPL_RSHIFT_U32(tmpU32no3, 5), gammaNoise); // Q(prevQNoise+11)
-            } else
-            {
-                // We can do shifting after multiplication
-                tmpU32no2
-                        = WEBRTC_SPL_RSHIFT_U32(WEBRTC_SPL_UMUL_32_16(tmpU32no3, gammaNoise), 5); // Q(prevQNoise+11)
-            }
-            if (sign > 0)
-            {
-                noiseUpdateU32 += tmpU32no2; // Q(prevQNoise+11)
-            } else
-            {
-                // This operation is safe. We can never get wrap around, since worst
-                // case scenario means magnU16 = 0
-                noiseUpdateU32 -= tmpU32no2; // Q(prevQNoise+11)
-            }
-        }
-
-        //increase gamma (i.e., less noise update) for frame likely to be speech
-        prevGammaNoise = gammaNoise;
-        gammaNoise = NOISE_UPDATE_Q8;
-        //time-constant based on speech/noise state
-        //increase gamma (i.e., less noise update) for frames likely to be speech
-        if (nonSpeechProbFinal[i] < ONE_MINUS_PROB_RANGE_Q8)
-        {
-            gammaNoise = GAMMA_NOISE_TRANS_AND_SPEECH_Q8;
-        }
-
-        if (prevGammaNoise != gammaNoise)
-        {
-            // new noise update
-            // this line is the same as above, only that the result is stored in a different variable and the gammaNoise
-            // has changed
-            //
-            // noiseUpdate = noisePrev[i] + (1 - gammaNoise) * nonSpeechProb * (magn[i] - noisePrev[i])
-
-            if (0x7c000000 & tmpU32no3)
-            {
-                // Shifting required before multiplication
-                tmpU32no2
-                        = WEBRTC_SPL_UMUL_32_16(WEBRTC_SPL_RSHIFT_U32(tmpU32no3, 5), gammaNoise); // Q(prevQNoise+11)
-            } else
-            {
-                // We can do shifting after multiplication
-                tmpU32no2
-                        = WEBRTC_SPL_RSHIFT_U32(WEBRTC_SPL_UMUL_32_16(tmpU32no3, gammaNoise), 5); // Q(prevQNoise+11)
-            }
-            if (sign > 0)
-            {
-                tmpU32no1 = inst->prevNoiseU32[i] + tmpU32no2; // Q(prevQNoise+11)
-            } else
-            {
-                tmpU32no1 = inst->prevNoiseU32[i] - tmpU32no2; // Q(prevQNoise+11)
-            }
-            if (noiseUpdateU32 > tmpU32no1)
-            {
-                noiseUpdateU32 = tmpU32no1; // Q(prevQNoise+11)
-            }
-        }
-        noiseU32[i] = noiseUpdateU32; // Q(prevQNoise+11)
-        if (noiseUpdateU32 > maxNoiseU32)
-        {
-            maxNoiseU32 = noiseUpdateU32;
-        }
-
-        // conservative noise update
-        // // original FLOAT code
-        // if (prob_speech < PROB_RANGE) {
-        // inst->avgMagnPause[i] = inst->avgMagnPause[i] + (1.0 - gamma_pause)*(magn[i] - inst->avgMagnPause[i]);
-        // }
-
-        tmp32no2 = WEBRTC_SPL_SHIFT_W32(inst->avgMagnPause[i], -nShifts);
-        if (nonSpeechProbFinal[i] > ONE_MINUS_PROB_RANGE_Q8)
-        {
-            if (nShifts < 0)
-            {
-                tmp32no1 = (WebRtc_Word32)magnU16[i] - tmp32no2; // Q(qMagn)
-                tmp32no1 = WEBRTC_SPL_MUL_32_16(tmp32no1, ONE_MINUS_GAMMA_PAUSE_Q8); // Q(8+prevQMagn+nShifts)
-                tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no1 + 128, 8); // Q(qMagn)
-            } else
-            {
-                tmp32no1 = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)magnU16[i], nShifts)
-                        - inst->avgMagnPause[i]; // Q(qMagn+nShifts)
-                tmp32no1 = WEBRTC_SPL_MUL_32_16(tmp32no1, ONE_MINUS_GAMMA_PAUSE_Q8); // Q(8+prevQMagn+nShifts)
-                tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no1 + (128 << nShifts), 8 + nShifts); // Q(qMagn)
-            }
-            tmp32no2 += tmp32no1; // Q(qMagn)
-        }
-        inst->avgMagnPause[i] = tmp32no2;
-    } // end of frequency loop
-
-    norm32no1 = WebRtcSpl_NormU32(maxNoiseU32);
-    qNoise = inst->prevQNoise + norm32no1 - 5;
-    // done with step 2: noise update
-
-    // STEP 3: compute dd update of prior snr and post snr based on new noise estimate
-    nShifts = inst->prevQNoise + 11 - qMagn;
-    for (i = 0; i < inst->magnLen; i++)
-    {
-        // FLOAT code
-        // // post and prior SNR
-        // curNearSnr = 0.0;
-        // if (magn[i] > noise[i])
-        // {
-        // curNearSnr = magn[i] / (noise[i] + 0.0001) - 1.0;
-        // }
-        // // DD estimate is sum of two terms: current estimate and previous estimate
-        // // directed decision update of snrPrior
-        // snrPrior = DD_PR_SNR * prevNearSnr[i] + (1.0 - DD_PR_SNR) * curNearSnr;
-        // // gain filter
-        // tmpFloat1 = inst->overdrive + snrPrior;
-        // tmpFloat2 = snrPrior / tmpFloat1;
-        // theFilter[i] = tmpFloat2;
-
-        // calculate curNearSnr again, this is necessary because a new noise estimate has been made since then. for the original
-        curNearSnr = 0; // Q11
-        if (nShifts < 0)
-        {
-            // This case is equivalent with magn < noise which implies curNearSnr = 0;
-            tmpMagnU32 = (WebRtc_UWord32)magnU16[i]; // Q(qMagn)
-            tmpNoiseU32 = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], -nShifts); // Q(qMagn)
-        } else if (nShifts > 17)
-        {
-            tmpMagnU32 = WEBRTC_SPL_LSHIFT_U32(magnU16[i], 17); // Q(qMagn+17)
-            tmpNoiseU32 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], nShifts - 17); // Q(qMagn+17)
-        } else
-        {
-            tmpMagnU32 = WEBRTC_SPL_LSHIFT_U32((WebRtc_UWord32)magnU16[i], nShifts); // Q(qNoise_prev+11)
-            tmpNoiseU32 = noiseU32[i]; // Q(qNoise_prev+11)
-        }
-        if (tmpMagnU32 > tmpNoiseU32)
-        {
-            tmpU32no1 = tmpMagnU32 - tmpNoiseU32; // Q(qCur)
-            norm32no2 = WEBRTC_SPL_MIN(11, WebRtcSpl_NormU32(tmpU32no1));
-            tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, norm32no2); // Q(qCur+norm32no2)
-            tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(tmpNoiseU32, 11 - norm32no2); // Q(qCur+norm32no2-11)
-            if (tmpU32no2)
-            {
-                tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2); // Q11
-            }
-            curNearSnr = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11
-        }
-
-        //directed decision update of priorSnr
-        // FLOAT
-        // priorSnr = DD_PR_SNR * prevNearSnr + (1.0-DD_PR_SNR) * curNearSnr;
-
-        tmpU32no1 = WEBRTC_SPL_UMUL_32_16(prevNearSnr[i], DD_PR_SNR_Q11); // Q22
-        tmpU32no2 = WEBRTC_SPL_UMUL_32_16(curNearSnr, ONE_MINUS_DD_PR_SNR_Q11); // Q22
-        priorSnr = tmpU32no1 + tmpU32no2; // Q22
-
-        //gain filter
-        tmpU32no1 = (WebRtc_UWord32)(inst->overdrive)
-                + WEBRTC_SPL_RSHIFT_U32(priorSnr + 8192, 14); // Q8
-        tmpU16no1 = (WebRtc_UWord16)WEBRTC_SPL_UDIV(priorSnr + (tmpU32no1 >> 1), tmpU32no1); // Q14
-        inst->noiseSupFilter[i] = WEBRTC_SPL_SAT(16384, tmpU16no1, inst->denoiseBound); // 16384 = Q14(1.0) // Q14
-
-        // Weight in the parametric Wiener filter during startup
-        if (inst->blockIndex < END_STARTUP_SHORT)
-        {
-            // Weight the two suppression filters
-            tmpU32no1 = WEBRTC_SPL_UMUL_16_16(inst->noiseSupFilter[i],
-                                              (WebRtc_UWord16)inst->blockIndex);
-            tmpU32no2 = WEBRTC_SPL_UMUL_16_16(noiseSupFilterTmp[i],
-                                              (WebRtc_UWord16)(END_STARTUP_SHORT
-                                                      - inst->blockIndex));
-            tmpU32no1 += tmpU32no2;
-            inst->noiseSupFilter[i] = (WebRtc_UWord16)WebRtcSpl_DivU32U16(tmpU32no1,
-                                                                          END_STARTUP_SHORT);
-        }
-    } // end of loop over frequencies
-    //done with step3
-
-    // save noise and magnitude spectrum for next frame
-    inst->prevQNoise = qNoise;
-    inst->prevQMagn = qMagn;
-    if (norm32no1 > 5)
-    {
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            inst->prevNoiseU32[i] = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], norm32no1 - 5); // Q(qNoise+11)
-            inst->prevMagnU16[i] = magnU16[i]; // Q(qMagn)
-        }
-    } else
-    {
-        for (i = 0; i < inst->magnLen; i++)
-        {
-            inst->prevNoiseU32[i] = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], 5 - norm32no1); // Q(qNoise+11)
-            inst->prevMagnU16[i] = magnU16[i]; // Q(qMagn)
-        }
-    }
-
-    WebRtcNsx_DataSynthesis(inst, outFrame);
-#ifdef NS_FILEDEBUG
-    fwrite(outframe, sizeof(short), inst->blockLen10ms, inst->outfile);
-#endif
-
-    //for H band:
-    // only update data buffer, then apply time-domain gain is applied derived from L band
-    if (inst->fs == 32000)
-    {
-        // update analysis buffer for H band
-        // append new data to buffer FX
-        WEBRTC_SPL_MEMCPY_W16(inst->dataBufHBFX, inst->dataBufHBFX + inst->blockLen10ms, inst->anaLen - inst->blockLen10ms);
-        WEBRTC_SPL_MEMCPY_W16(inst->dataBufHBFX + inst->anaLen - inst->blockLen10ms, speechFrameHB, inst->blockLen10ms);
-        // range for averaging low band quantities for H band gain
-
-        gainTimeDomainHB = 16384; // 16384 = Q14(1.0)
-        //average speech prob from low band
-        //average filter gain from low band
-        //avg over second half (i.e., 4->8kHz) of freq. spectrum
-        tmpU32no1 = 0; // Q12
-        tmpU16no1 = 0; // Q8
-        for (i = inst->anaLen2 - (inst->anaLen2 >> 2); i < inst->anaLen2; i++)
-        {
-            tmpU16no1 += nonSpeechProbFinal[i]; // Q8
-            tmpU32no1 += (WebRtc_UWord32)(inst->noiseSupFilter[i]); // Q14
-        }
-        avgProbSpeechHB = (WebRtc_Word16)(4096
-                - WEBRTC_SPL_RSHIFT_U16(tmpU16no1, inst->stages - 7)); // Q12
-        avgFilterGainHB = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, inst->stages - 3); // Q14
-
-        // // original FLOAT code
-        // // gain based on speech probability:
-        // avg_prob_speech_tt=(float)2.0*avg_prob_speech-(float)1.0;
-        // gain_mod=(float)0.5*((float)1.0+(float)tanh(avg_prob_speech_tt)); // between 0 and 1
-
-        // gain based on speech probability:
-        // original expression: "0.5 * (1 + tanh(2x-1))"
-        // avgProbSpeechHB has been anyway saturated to a value between 0 and 1 so the other cases don't have to be dealt with
-        // avgProbSpeechHB and gainModHB are in Q12, 3607 = Q12(0.880615234375) which is a zero point of
-        // |0.5 * (1 + tanh(2x-1)) - x| - |0.5 * (1 + tanh(2x-1)) - 0.880615234375| meaning that from that point the error of approximating
-        // the expression with f(x) = x would be greater than the error of approximating the expression with f(x) = 0.880615234375
-        // error: "|0.5 * (1 + tanh(2x-1)) - x| from x=0 to 0.880615234375" -> http://www.wolframalpha.com/input/?i=|0.5+*+(1+%2B+tanh(2x-1))+-+x|+from+x%3D0+to+0.880615234375
-        // and:  "|0.5 * (1 + tanh(2x-1)) - 0.880615234375| from x=0.880615234375 to 1" -> http://www.wolframalpha.com/input/?i=+|0.5+*+(1+%2B+tanh(2x-1))+-+0.880615234375|+from+x%3D0.880615234375+to+1
-        gainModHB = WEBRTC_SPL_MIN(avgProbSpeechHB, 3607);
-
-        // // original FLOAT code
-        // //combine gain with low band gain
-        // if (avg_prob_speech < (float)0.5) {
-        // gain_time_domain_HB=(float)0.5*gain_mod+(float)0.5*avg_filter_gain;
-        // }
-        // else {
-        // gain_time_domain_HB=(float)0.25*gain_mod+(float)0.75*avg_filter_gain;
-        // }
-
-
-        //combine gain with low band gain
-        if (avgProbSpeechHB < 2048)
-        { // 2048 = Q12(0.5)
-            // the next two lines in float are  "gain_time_domain = 0.5 * gain_mod + 0.5 * avg_filter_gain"; Q2(0.5) = 2 equals one left shift
-            gainTimeDomainHB = (gainModHB << 1) + (avgFilterGainHB >> 1); // Q14
-        } else
-        {
-            // "gain_time_domain = 0.25 * gain_mod + 0.75 * agv_filter_gain;"
-            gainTimeDomainHB = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(3, avgFilterGainHB, 2); // 3 = Q2(0.75); Q14
-            gainTimeDomainHB += gainModHB; // Q14
-        }
-        //make sure gain is within flooring range
-        gainTimeDomainHB
-                = WEBRTC_SPL_SAT(16384, gainTimeDomainHB, (WebRtc_Word16)(inst->denoiseBound)); // 16384 = Q14(1.0)
-
-
-        //apply gain
-        for (i = 0; i < inst->blockLen10ms; i++)
-        {
-            outFrameHB[i]
-                    = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(gainTimeDomainHB, inst->dataBufHBFX[i], 14); // Q0
-        }
-    } // end of H band gain computation
-
-    return 0;
-}
diff --git a/src/modules/audio_processing/ns/main/source/nsx_core.h b/src/modules/audio_processing/ns/main/source/nsx_core.h
deleted file mode 100644
index 2e74303..0000000
--- a/src/modules/audio_processing/ns/main/source/nsx_core.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_
-
-#include "typedefs.h"
-#include "signal_processing_library.h"
-
-#include "nsx_defines.h"
-
-#ifdef NS_FILEDEBUG
-#include <stdio.h>
-#endif
-
-typedef struct NsxInst_t_
-{
-    WebRtc_UWord32          fs;
-
-    const WebRtc_Word16*    window;
-    WebRtc_Word16           analysisBuffer[ANAL_BLOCKL_MAX];
-    WebRtc_Word16           synthesisBuffer[ANAL_BLOCKL_MAX];
-    WebRtc_UWord16          noiseSupFilter[HALF_ANAL_BLOCKL];
-    WebRtc_UWord16          overdrive; /* Q8 */
-    WebRtc_UWord16          denoiseBound; /* Q14 */
-    const WebRtc_Word16*    factor2Table;
-    WebRtc_Word16           noiseEstLogQuantile[SIMULT * HALF_ANAL_BLOCKL];
-    WebRtc_Word16           noiseEstDensity[SIMULT * HALF_ANAL_BLOCKL];
-    WebRtc_Word16           noiseEstCounter[SIMULT];
-    WebRtc_Word16           noiseEstQuantile[HALF_ANAL_BLOCKL];
-
-    WebRtc_Word16           anaLen;
-    int                     anaLen2;
-    int                     magnLen;
-    int                     aggrMode;
-    int                     stages;
-    int                     initFlag;
-    int                     gainMap;
-
-    WebRtc_Word32           maxLrt;
-    WebRtc_Word32           minLrt;
-    WebRtc_Word32           logLrtTimeAvgW32[HALF_ANAL_BLOCKL]; //log lrt factor with time-smoothing in Q8
-    WebRtc_Word32           featureLogLrt;
-    WebRtc_Word32           thresholdLogLrt;
-    WebRtc_Word16           weightLogLrt;
-
-    WebRtc_UWord32          featureSpecDiff;
-    WebRtc_UWord32          thresholdSpecDiff;
-    WebRtc_Word16           weightSpecDiff;
-
-    WebRtc_UWord32          featureSpecFlat;
-    WebRtc_UWord32          thresholdSpecFlat;
-    WebRtc_Word16           weightSpecFlat;
-
-    WebRtc_Word32           avgMagnPause[HALF_ANAL_BLOCKL]; //conservative estimate of noise spectrum
-    WebRtc_UWord32          magnEnergy;
-    WebRtc_UWord32          sumMagn;
-    WebRtc_UWord32          curAvgMagnEnergy;
-    WebRtc_UWord32          timeAvgMagnEnergy;
-    WebRtc_UWord32          timeAvgMagnEnergyTmp;
-
-    WebRtc_UWord32          whiteNoiseLevel;              //initial noise estimate
-    WebRtc_UWord32          initMagnEst[HALF_ANAL_BLOCKL];//initial magnitude spectrum estimate
-    WebRtc_Word32           pinkNoiseNumerator;           //pink noise parameter: numerator
-    WebRtc_Word32           pinkNoiseExp;                 //pink noise parameter: power of freq
-    int                     minNorm;                      //smallest normalization factor
-    int                     zeroInputSignal;              //zero input signal flag
-
-    WebRtc_UWord32          prevNoiseU32[HALF_ANAL_BLOCKL]; //noise spectrum from previous frame
-    WebRtc_UWord16          prevMagnU16[HALF_ANAL_BLOCKL]; //magnitude spectrum from previous frame
-    WebRtc_Word16           priorNonSpeechProb; //prior speech/noise probability // Q14
-
-    int                     blockIndex; //frame index counter
-    int                     modelUpdate; //parameter for updating or estimating thresholds/weights for prior model
-    int                     cntThresUpdate;
-
-    //histograms for parameter estimation
-    WebRtc_Word16           histLrt[HIST_PAR_EST];
-    WebRtc_Word16           histSpecFlat[HIST_PAR_EST];
-    WebRtc_Word16           histSpecDiff[HIST_PAR_EST];
-
-    //quantities for high band estimate
-    WebRtc_Word16           dataBufHBFX[ANAL_BLOCKL_MAX]; /* Q0 */
-
-    int                     qNoise;
-    int                     prevQNoise;
-    int                     prevQMagn;
-    int                     blockLen10ms;
-
-    WebRtc_Word16           real[ANAL_BLOCKL_MAX];
-    WebRtc_Word16           imag[ANAL_BLOCKL_MAX];
-    WebRtc_Word32           energyIn;
-    int                     scaleEnergyIn;
-    int                     normData;
-
-} NsxInst_t;
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/****************************************************************************
- * WebRtcNsx_InitCore(...)
- *
- * This function initializes a noise suppression instance
- *
- * Input:
- *      - inst          : Instance that should be initialized
- *      - fs            : Sampling frequency
- *
- * Output:
- *      - inst          : Initialized instance
- *
- * Return value         :  0 - Ok
- *                        -1 - Error
- */
-WebRtc_Word32 WebRtcNsx_InitCore(NsxInst_t *inst, WebRtc_UWord32 fs);
-
-/****************************************************************************
- * WebRtcNsx_set_policy_core(...)
- *
- * This changes the aggressiveness of the noise suppression method.
- *
- * Input:
- *      - inst          : Instance that should be initialized
- *      - mode          : 0: Mild (6 dB), 1: Medium (10 dB), 2: Aggressive (15 dB)
- *
- * Output:
- *      - NS_inst      : Initialized instance
- *
- * Return value         :  0 - Ok
- *                        -1 - Error
- */
-int WebRtcNsx_set_policy_core(NsxInst_t *inst, int mode);
-
-/****************************************************************************
- * WebRtcNsx_ProcessCore
- *
- * Do noise suppression.
- *
- * Input:
- *      - inst          : Instance that should be initialized
- *      - inFrameLow    : Input speech frame for lower band
- *      - inFrameHigh   : Input speech frame for higher band
- *
- * Output:
- *      - inst          : Updated instance
- *      - outFrameLow   : Output speech frame for lower band
- *      - outFrameHigh  : Output speech frame for higher band
- *
- * Return value         :  0 - OK
- *                        -1 - Error
- */
-int WebRtcNsx_ProcessCore(NsxInst_t *inst, short *inFrameLow, short *inFrameHigh,
-                          short *outFrameLow, short *outFrameHigh);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_
diff --git a/src/modules/audio_processing/ns/main/source/windows_private.h b/src/modules/audio_processing/ns/main/source/windows_private.h
deleted file mode 100644
index 8f9006e..0000000
--- a/src/modules/audio_processing/ns/main/source/windows_private.h
+++ /dev/null
@@ -1,573 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_
-
-// Hanning window for 4ms 16kHz
-static const float kHanning64w128[128] = {
-0.00000000000000f, 0.02454122852291f, 0.04906767432742f,
-0.07356456359967f, 0.09801714032956f, 0.12241067519922f,
-0.14673047445536f, 0.17096188876030f, 0.19509032201613f,
-0.21910124015687f, 0.24298017990326f, 0.26671275747490f,
-0.29028467725446f, 0.31368174039889f, 0.33688985339222f,
-0.35989503653499f, 0.38268343236509f, 0.40524131400499f,
-0.42755509343028f, 0.44961132965461f, 0.47139673682600f,
-0.49289819222978f, 0.51410274419322f, 0.53499761988710f,
-0.55557023301960f, 0.57580819141785f, 0.59569930449243f,
-0.61523159058063f, 0.63439328416365f, 0.65317284295378f,
-0.67155895484702f, 0.68954054473707f, 0.70710678118655f,
-0.72424708295147f, 0.74095112535496f, 0.75720884650648f,
-0.77301045336274f, 0.78834642762661f, 0.80320753148064f,
-0.81758481315158f, 0.83146961230255f, 0.84485356524971f,
-0.85772861000027f, 0.87008699110871f, 0.88192126434835f,
-0.89322430119552f, 0.90398929312344f, 0.91420975570353f,
-0.92387953251129f, 0.93299279883474f, 0.94154406518302f,
-0.94952818059304f, 0.95694033573221f, 0.96377606579544f,
-0.97003125319454f, 0.97570213003853f, 0.98078528040323f,
-0.98527764238894f, 0.98917650996478f, 0.99247953459871f,
-0.99518472667220f, 0.99729045667869f, 0.99879545620517f,
-0.99969881869620f, 1.00000000000000f,
-0.99969881869620f, 0.99879545620517f, 0.99729045667869f,
-0.99518472667220f, 0.99247953459871f, 0.98917650996478f,
-0.98527764238894f, 0.98078528040323f, 0.97570213003853f,
-0.97003125319454f, 0.96377606579544f, 0.95694033573221f,
-0.94952818059304f, 0.94154406518302f, 0.93299279883474f,
-0.92387953251129f, 0.91420975570353f, 0.90398929312344f,
-0.89322430119552f, 0.88192126434835f, 0.87008699110871f,
-0.85772861000027f, 0.84485356524971f, 0.83146961230255f,
-0.81758481315158f, 0.80320753148064f, 0.78834642762661f,
-0.77301045336274f, 0.75720884650648f, 0.74095112535496f,
-0.72424708295147f, 0.70710678118655f, 0.68954054473707f,
-0.67155895484702f, 0.65317284295378f, 0.63439328416365f,
-0.61523159058063f, 0.59569930449243f, 0.57580819141785f,
-0.55557023301960f, 0.53499761988710f, 0.51410274419322f,
-0.49289819222978f, 0.47139673682600f, 0.44961132965461f,
-0.42755509343028f, 0.40524131400499f, 0.38268343236509f,
-0.35989503653499f, 0.33688985339222f, 0.31368174039889f,
-0.29028467725446f, 0.26671275747490f, 0.24298017990326f,
-0.21910124015687f, 0.19509032201613f, 0.17096188876030f,
-0.14673047445536f, 0.12241067519922f, 0.09801714032956f,
-0.07356456359967f, 0.04906767432742f, 0.02454122852291f
-};
-
-
-
-// hybrib Hanning & flat window
-static const float kBlocks80w128[128] = {
-(float)0.00000000, (float)0.03271908, (float)0.06540313, (float)0.09801714, (float)0.13052619,
-(float)0.16289547, (float)0.19509032, (float)0.22707626, (float)0.25881905, (float)0.29028468,
-(float)0.32143947, (float)0.35225005, (float)0.38268343, (float)0.41270703, (float)0.44228869,
-(float)0.47139674, (float)0.50000000, (float)0.52806785, (float)0.55557023, (float)0.58247770,
-(float)0.60876143, (float)0.63439328, (float)0.65934582, (float)0.68359230, (float)0.70710678,
-(float)0.72986407, (float)0.75183981, (float)0.77301045, (float)0.79335334, (float)0.81284668,
-(float)0.83146961, (float)0.84920218, (float)0.86602540, (float)0.88192126, (float)0.89687274,
-(float)0.91086382, (float)0.92387953, (float)0.93590593, (float)0.94693013, (float)0.95694034,
-(float)0.96592583, (float)0.97387698, (float)0.98078528, (float)0.98664333, (float)0.99144486,
-(float)0.99518473, (float)0.99785892, (float)0.99946459, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)0.99946459, (float)0.99785892, (float)0.99518473, (float)0.99144486,
-(float)0.98664333, (float)0.98078528, (float)0.97387698, (float)0.96592583, (float)0.95694034,
-(float)0.94693013, (float)0.93590593, (float)0.92387953, (float)0.91086382, (float)0.89687274,
-(float)0.88192126, (float)0.86602540, (float)0.84920218, (float)0.83146961, (float)0.81284668,
-(float)0.79335334, (float)0.77301045, (float)0.75183981, (float)0.72986407, (float)0.70710678,
-(float)0.68359230, (float)0.65934582, (float)0.63439328, (float)0.60876143, (float)0.58247770,
-(float)0.55557023, (float)0.52806785, (float)0.50000000, (float)0.47139674, (float)0.44228869,
-(float)0.41270703, (float)0.38268343, (float)0.35225005, (float)0.32143947, (float)0.29028468,
-(float)0.25881905, (float)0.22707626, (float)0.19509032, (float)0.16289547, (float)0.13052619,
-(float)0.09801714, (float)0.06540313, (float)0.03271908
-};
-
-// hybrib Hanning & flat window
-static const float kBlocks160w256[256] = {
-(float)0.00000000, (float)0.01636173, (float)0.03271908, (float)0.04906767, (float)0.06540313,
-(float)0.08172107, (float)0.09801714, (float)0.11428696, (float)0.13052619, (float)0.14673047,
-(float)0.16289547, (float)0.17901686, (float)0.19509032, (float)0.21111155, (float)0.22707626,
-(float)0.24298018, (float)0.25881905, (float)0.27458862, (float)0.29028468, (float)0.30590302,
-(float)0.32143947, (float)0.33688985, (float)0.35225005, (float)0.36751594, (float)0.38268343,
-(float)0.39774847, (float)0.41270703, (float)0.42755509, (float)0.44228869, (float)0.45690388,
-(float)0.47139674, (float)0.48576339, (float)0.50000000, (float)0.51410274, (float)0.52806785,
-(float)0.54189158, (float)0.55557023, (float)0.56910015, (float)0.58247770, (float)0.59569930,
-(float)0.60876143, (float)0.62166057, (float)0.63439328, (float)0.64695615, (float)0.65934582,
-(float)0.67155895, (float)0.68359230, (float)0.69544264, (float)0.70710678, (float)0.71858162,
-(float)0.72986407, (float)0.74095113, (float)0.75183981, (float)0.76252720, (float)0.77301045,
-(float)0.78328675, (float)0.79335334, (float)0.80320753, (float)0.81284668, (float)0.82226822,
-(float)0.83146961, (float)0.84044840, (float)0.84920218, (float)0.85772861, (float)0.86602540,
-(float)0.87409034, (float)0.88192126, (float)0.88951608, (float)0.89687274, (float)0.90398929,
-(float)0.91086382, (float)0.91749450, (float)0.92387953, (float)0.93001722, (float)0.93590593,
-(float)0.94154407, (float)0.94693013, (float)0.95206268, (float)0.95694034, (float)0.96156180,
-(float)0.96592583, (float)0.97003125, (float)0.97387698, (float)0.97746197, (float)0.98078528,
-(float)0.98384601, (float)0.98664333, (float)0.98917651, (float)0.99144486, (float)0.99344778,
-(float)0.99518473, (float)0.99665524, (float)0.99785892, (float)0.99879546, (float)0.99946459,
-(float)0.99986614, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)0.99986614, (float)0.99946459, (float)0.99879546, (float)0.99785892,
-(float)0.99665524, (float)0.99518473, (float)0.99344778, (float)0.99144486, (float)0.98917651,
-(float)0.98664333, (float)0.98384601, (float)0.98078528, (float)0.97746197, (float)0.97387698,
-(float)0.97003125, (float)0.96592583, (float)0.96156180, (float)0.95694034, (float)0.95206268,
-(float)0.94693013, (float)0.94154407, (float)0.93590593, (float)0.93001722, (float)0.92387953,
-(float)0.91749450, (float)0.91086382, (float)0.90398929, (float)0.89687274, (float)0.88951608,
-(float)0.88192126, (float)0.87409034, (float)0.86602540, (float)0.85772861, (float)0.84920218,
-(float)0.84044840, (float)0.83146961, (float)0.82226822, (float)0.81284668, (float)0.80320753,
-(float)0.79335334, (float)0.78328675, (float)0.77301045, (float)0.76252720, (float)0.75183981,
-(float)0.74095113, (float)0.72986407, (float)0.71858162, (float)0.70710678, (float)0.69544264,
-(float)0.68359230, (float)0.67155895, (float)0.65934582, (float)0.64695615, (float)0.63439328,
-(float)0.62166057, (float)0.60876143, (float)0.59569930, (float)0.58247770, (float)0.56910015,
-(float)0.55557023, (float)0.54189158, (float)0.52806785, (float)0.51410274, (float)0.50000000,
-(float)0.48576339, (float)0.47139674, (float)0.45690388, (float)0.44228869, (float)0.42755509,
-(float)0.41270703, (float)0.39774847, (float)0.38268343, (float)0.36751594, (float)0.35225005,
-(float)0.33688985, (float)0.32143947, (float)0.30590302, (float)0.29028468, (float)0.27458862,
-(float)0.25881905, (float)0.24298018, (float)0.22707626, (float)0.21111155, (float)0.19509032,
-(float)0.17901686, (float)0.16289547, (float)0.14673047, (float)0.13052619, (float)0.11428696,
-(float)0.09801714, (float)0.08172107, (float)0.06540313, (float)0.04906767, (float)0.03271908,
-(float)0.01636173
-};
-
-// hybrib Hanning & flat window: for 20ms
-static const float kBlocks320w512[512] = {
-(float)0.00000000, (float)0.00818114, (float)0.01636173, (float)0.02454123, (float)0.03271908,
-(float)0.04089475, (float)0.04906767, (float)0.05723732, (float)0.06540313, (float)0.07356456,
-(float)0.08172107, (float)0.08987211, (float)0.09801714, (float)0.10615561, (float)0.11428696,
-(float)0.12241068, (float)0.13052619, (float)0.13863297, (float)0.14673047, (float)0.15481816,
-(float)0.16289547, (float)0.17096189, (float)0.17901686, (float)0.18705985, (float)0.19509032,
-(float)0.20310773, (float)0.21111155, (float)0.21910124, (float)0.22707626, (float)0.23503609,
-(float)0.24298018, (float)0.25090801, (float)0.25881905, (float)0.26671276, (float)0.27458862,
-(float)0.28244610, (float)0.29028468, (float)0.29810383, (float)0.30590302, (float)0.31368174,
-(float)0.32143947, (float)0.32917568, (float)0.33688985, (float)0.34458148, (float)0.35225005,
-(float)0.35989504, (float)0.36751594, (float)0.37511224, (float)0.38268343, (float)0.39022901,
-(float)0.39774847, (float)0.40524131, (float)0.41270703, (float)0.42014512, (float)0.42755509,
-(float)0.43493645, (float)0.44228869, (float)0.44961133, (float)0.45690388, (float)0.46416584,
-(float)0.47139674, (float)0.47859608, (float)0.48576339, (float)0.49289819, (float)0.50000000,
-(float)0.50706834, (float)0.51410274, (float)0.52110274, (float)0.52806785, (float)0.53499762,
-(float)0.54189158, (float)0.54874927, (float)0.55557023, (float)0.56235401, (float)0.56910015,
-(float)0.57580819, (float)0.58247770, (float)0.58910822, (float)0.59569930, (float)0.60225052,
-(float)0.60876143, (float)0.61523159, (float)0.62166057, (float)0.62804795, (float)0.63439328,
-(float)0.64069616, (float)0.64695615, (float)0.65317284, (float)0.65934582, (float)0.66547466,
-(float)0.67155895, (float)0.67759830, (float)0.68359230, (float)0.68954054, (float)0.69544264,
-(float)0.70129818, (float)0.70710678, (float)0.71286806, (float)0.71858162, (float)0.72424708,
-(float)0.72986407, (float)0.73543221, (float)0.74095113, (float)0.74642045, (float)0.75183981,
-(float)0.75720885, (float)0.76252720, (float)0.76779452, (float)0.77301045, (float)0.77817464,
-(float)0.78328675, (float)0.78834643, (float)0.79335334, (float)0.79830715, (float)0.80320753,
-(float)0.80805415, (float)0.81284668, (float)0.81758481, (float)0.82226822, (float)0.82689659,
-(float)0.83146961, (float)0.83598698, (float)0.84044840, (float)0.84485357, (float)0.84920218,
-(float)0.85349396, (float)0.85772861, (float)0.86190585, (float)0.86602540, (float)0.87008699,
-(float)0.87409034, (float)0.87803519, (float)0.88192126, (float)0.88574831, (float)0.88951608,
-(float)0.89322430, (float)0.89687274, (float)0.90046115, (float)0.90398929, (float)0.90745693,
-(float)0.91086382, (float)0.91420976, (float)0.91749450, (float)0.92071783, (float)0.92387953,
-(float)0.92697940, (float)0.93001722, (float)0.93299280, (float)0.93590593, (float)0.93875641,
-(float)0.94154407, (float)0.94426870, (float)0.94693013, (float)0.94952818, (float)0.95206268,
-(float)0.95453345, (float)0.95694034, (float)0.95928317, (float)0.96156180, (float)0.96377607,
-(float)0.96592583, (float)0.96801094, (float)0.97003125, (float)0.97198664, (float)0.97387698,
-(float)0.97570213, (float)0.97746197, (float)0.97915640, (float)0.98078528, (float)0.98234852,
-(float)0.98384601, (float)0.98527764, (float)0.98664333, (float)0.98794298, (float)0.98917651,
-(float)0.99034383, (float)0.99144486, (float)0.99247953, (float)0.99344778, (float)0.99434953,
-(float)0.99518473, (float)0.99595331, (float)0.99665524, (float)0.99729046, (float)0.99785892,
-(float)0.99836060, (float)0.99879546, (float)0.99916346, (float)0.99946459, (float)0.99969882,
-(float)0.99986614, (float)0.99996653, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
-(float)1.00000000, (float)0.99996653, (float)0.99986614, (float)0.99969882, (float)0.99946459,
-(float)0.99916346, (float)0.99879546, (float)0.99836060, (float)0.99785892, (float)0.99729046,
-(float)0.99665524, (float)0.99595331, (float)0.99518473, (float)0.99434953, (float)0.99344778,
-(float)0.99247953, (float)0.99144486, (float)0.99034383, (float)0.98917651, (float)0.98794298,
-(float)0.98664333, (float)0.98527764, (float)0.98384601, (float)0.98234852, (float)0.98078528,
-(float)0.97915640, (float)0.97746197, (float)0.97570213, (float)0.97387698, (float)0.97198664,
-(float)0.97003125, (float)0.96801094, (float)0.96592583, (float)0.96377607, (float)0.96156180,
-(float)0.95928317, (float)0.95694034, (float)0.95453345, (float)0.95206268, (float)0.94952818,
-(float)0.94693013, (float)0.94426870, (float)0.94154407, (float)0.93875641, (float)0.93590593,
-(float)0.93299280, (float)0.93001722, (float)0.92697940, (float)0.92387953, (float)0.92071783,
-(float)0.91749450, (float)0.91420976, (float)0.91086382, (float)0.90745693, (float)0.90398929,
-(float)0.90046115, (float)0.89687274, (float)0.89322430, (float)0.88951608, (float)0.88574831,
-(float)0.88192126, (float)0.87803519, (float)0.87409034, (float)0.87008699, (float)0.86602540,
-(float)0.86190585, (float)0.85772861, (float)0.85349396, (float)0.84920218, (float)0.84485357,
-(float)0.84044840, (float)0.83598698, (float)0.83146961, (float)0.82689659, (float)0.82226822,
-(float)0.81758481, (float)0.81284668, (float)0.80805415, (float)0.80320753, (float)0.79830715,
-(float)0.79335334, (float)0.78834643, (float)0.78328675, (float)0.77817464, (float)0.77301045,
-(float)0.76779452, (float)0.76252720, (float)0.75720885, (float)0.75183981, (float)0.74642045,
-(float)0.74095113, (float)0.73543221, (float)0.72986407, (float)0.72424708, (float)0.71858162,
-(float)0.71286806, (float)0.70710678, (float)0.70129818, (float)0.69544264, (float)0.68954054,
-(float)0.68359230, (float)0.67759830, (float)0.67155895, (float)0.66547466, (float)0.65934582,
-(float)0.65317284, (float)0.64695615, (float)0.64069616, (float)0.63439328, (float)0.62804795,
-(float)0.62166057, (float)0.61523159, (float)0.60876143, (float)0.60225052, (float)0.59569930,
-(float)0.58910822, (float)0.58247770, (float)0.57580819, (float)0.56910015, (float)0.56235401,
-(float)0.55557023, (float)0.54874927, (float)0.54189158, (float)0.53499762, (float)0.52806785,
-(float)0.52110274, (float)0.51410274, (float)0.50706834, (float)0.50000000, (float)0.49289819,
-(float)0.48576339, (float)0.47859608, (float)0.47139674, (float)0.46416584, (float)0.45690388,
-(float)0.44961133, (float)0.44228869, (float)0.43493645, (float)0.42755509, (float)0.42014512,
-(float)0.41270703, (float)0.40524131, (float)0.39774847, (float)0.39022901, (float)0.38268343,
-(float)0.37511224, (float)0.36751594, (float)0.35989504, (float)0.35225005, (float)0.34458148,
-(float)0.33688985, (float)0.32917568, (float)0.32143947, (float)0.31368174, (float)0.30590302,
-(float)0.29810383, (float)0.29028468, (float)0.28244610, (float)0.27458862, (float)0.26671276,
-(float)0.25881905, (float)0.25090801, (float)0.24298018, (float)0.23503609, (float)0.22707626,
-(float)0.21910124, (float)0.21111155, (float)0.20310773, (float)0.19509032, (float)0.18705985,
-(float)0.17901686, (float)0.17096189, (float)0.16289547, (float)0.15481816, (float)0.14673047,
-(float)0.13863297, (float)0.13052619, (float)0.12241068, (float)0.11428696, (float)0.10615561,
-(float)0.09801714, (float)0.08987211, (float)0.08172107, (float)0.07356456, (float)0.06540313,
-(float)0.05723732, (float)0.04906767, (float)0.04089475, (float)0.03271908, (float)0.02454123,
-(float)0.01636173, (float)0.00818114
-};
-
-
-// Hanning window: for 15ms at 16kHz with symmetric zeros
-static const float kBlocks240w512[512] = {
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00654494, (float)0.01308960, (float)0.01963369,
-(float)0.02617695, (float)0.03271908, (float)0.03925982, (float)0.04579887, (float)0.05233596,
-(float)0.05887080, (float)0.06540313, (float)0.07193266, (float)0.07845910, (float)0.08498218,
-(float)0.09150162, (float)0.09801714, (float)0.10452846, (float)0.11103531, (float)0.11753740,
-(float)0.12403446, (float)0.13052620, (float)0.13701233, (float)0.14349262, (float)0.14996676,
-(float)0.15643448, (float)0.16289547, (float)0.16934951, (float)0.17579629, (float)0.18223552,
-(float)0.18866697, (float)0.19509032, (float)0.20150533, (float)0.20791170, (float)0.21430916,
-(float)0.22069745, (float)0.22707628, (float)0.23344538, (float)0.23980446, (float)0.24615330,
-(float)0.25249159, (float)0.25881904, (float)0.26513544, (float)0.27144045, (float)0.27773386,
-(float)0.28401536, (float)0.29028466, (float)0.29654160, (float)0.30278578, (float)0.30901700,
-(float)0.31523499, (float)0.32143945, (float)0.32763019, (float)0.33380687, (float)0.33996925,
-(float)0.34611708, (float)0.35225007, (float)0.35836795, (float)0.36447051, (float)0.37055743,
-(float)0.37662852, (float)0.38268346, (float)0.38872197, (float)0.39474389, (float)0.40074885,
-(float)0.40673664, (float)0.41270703, (float)0.41865975, (float)0.42459452, (float)0.43051112,
-(float)0.43640924, (float)0.44228873, (float)0.44814920, (float)0.45399052, (float)0.45981237,
-(float)0.46561453, (float)0.47139674, (float)0.47715878, (float)0.48290035, (float)0.48862126,
-(float)0.49432120, (float)0.50000000, (float)0.50565743, (float)0.51129311, (float)0.51690692,
-(float)0.52249855, (float)0.52806789, (float)0.53361452, (float)0.53913832, (float)0.54463905,
-(float)0.55011642, (float)0.55557024, (float)0.56100029, (float)0.56640625, (float)0.57178795,
-(float)0.57714522, (float)0.58247769, (float)0.58778524, (float)0.59306765, (float)0.59832460,
-(float)0.60355598, (float)0.60876143, (float)0.61394083, (float)0.61909395, (float)0.62422055,
-(float)0.62932038, (float)0.63439333, (float)0.63943899, (float)0.64445734, (float)0.64944810,
-(float)0.65441096, (float)0.65934587, (float)0.66425246, (float)0.66913062, (float)0.67398012,
-(float)0.67880076, (float)0.68359232, (float)0.68835455, (float)0.69308740, (float)0.69779050,
-(float)0.70246369, (float)0.70710677, (float)0.71171963, (float)0.71630198, (float)0.72085363,
-(float)0.72537440, (float)0.72986406, (float)0.73432255, (float)0.73874950, (float)0.74314487,
-(float)0.74750835, (float)0.75183982, (float)0.75613910, (float)0.76040596, (float)0.76464027,
-(float)0.76884186, (float)0.77301043, (float)0.77714598, (float)0.78124821, (float)0.78531694,
-(float)0.78935206, (float)0.79335338, (float)0.79732066, (float)0.80125386, (float)0.80515265,
-(float)0.80901700, (float)0.81284672, (float)0.81664157, (float)0.82040149, (float)0.82412618,
-(float)0.82781565, (float)0.83146966, (float)0.83508795, (float)0.83867061, (float)0.84221727,
-(float)0.84572780, (float)0.84920216, (float)0.85264021, (float)0.85604161, (float)0.85940641,
-(float)0.86273444, (float)0.86602545, (float)0.86927933, (float)0.87249607, (float)0.87567532,
-(float)0.87881714, (float)0.88192129, (float)0.88498765, (float)0.88801610, (float)0.89100653,
-(float)0.89395881, (float)0.89687276, (float)0.89974827, (float)0.90258533, (float)0.90538365,
-(float)0.90814316, (float)0.91086388, (float)0.91354549, (float)0.91618794, (float)0.91879123,
-(float)0.92135513, (float)0.92387950, (float)0.92636442, (float)0.92880958, (float)0.93121493,
-(float)0.93358046, (float)0.93590593, (float)0.93819135, (float)0.94043654, (float)0.94264150,
-(float)0.94480604, (float)0.94693011, (float)0.94901365, (float)0.95105654, (float)0.95305866,
-(float)0.95501995, (float)0.95694035, (float)0.95881975, (float)0.96065807, (float)0.96245527,
-(float)0.96421117, (float)0.96592581, (float)0.96759909, (float)0.96923089, (float)0.97082120,
-(float)0.97236991, (float)0.97387701, (float)0.97534233, (float)0.97676587, (float)0.97814763,
-(float)0.97948742, (float)0.98078531, (float)0.98204112, (float)0.98325491, (float)0.98442656,
-(float)0.98555607, (float)0.98664331, (float)0.98768836, (float)0.98869103, (float)0.98965138,
-(float)0.99056935, (float)0.99144489, (float)0.99227792, (float)0.99306846, (float)0.99381649,
-(float)0.99452192, (float)0.99518472, (float)0.99580491, (float)0.99638247, (float)0.99691731,
-(float)0.99740952, (float)0.99785894, (float)0.99826562, (float)0.99862951, (float)0.99895066,
-(float)0.99922901, (float)0.99946457, (float)0.99965733, (float)0.99980724, (float)0.99991435,
-(float)0.99997860, (float)1.00000000, (float)0.99997860, (float)0.99991435, (float)0.99980724,
-(float)0.99965733, (float)0.99946457, (float)0.99922901, (float)0.99895066, (float)0.99862951,
-(float)0.99826562, (float)0.99785894, (float)0.99740946, (float)0.99691731, (float)0.99638247,
-(float)0.99580491, (float)0.99518472, (float)0.99452192, (float)0.99381644, (float)0.99306846,
-(float)0.99227792, (float)0.99144489, (float)0.99056935, (float)0.98965138, (float)0.98869103,
-(float)0.98768836, (float)0.98664331, (float)0.98555607, (float)0.98442656, (float)0.98325491,
-(float)0.98204112, (float)0.98078525, (float)0.97948742, (float)0.97814757, (float)0.97676587,
-(float)0.97534227, (float)0.97387695, (float)0.97236991, (float)0.97082120, (float)0.96923089,
-(float)0.96759909, (float)0.96592581, (float)0.96421117, (float)0.96245521, (float)0.96065807,
-(float)0.95881969, (float)0.95694029, (float)0.95501995, (float)0.95305860, (float)0.95105648,
-(float)0.94901365, (float)0.94693011, (float)0.94480604, (float)0.94264150, (float)0.94043654,
-(float)0.93819129, (float)0.93590593, (float)0.93358046, (float)0.93121493, (float)0.92880952,
-(float)0.92636436, (float)0.92387950, (float)0.92135507, (float)0.91879123, (float)0.91618794,
-(float)0.91354543, (float)0.91086382, (float)0.90814310, (float)0.90538365, (float)0.90258527,
-(float)0.89974827, (float)0.89687276, (float)0.89395875, (float)0.89100647, (float)0.88801610,
-(float)0.88498759, (float)0.88192123, (float)0.87881714, (float)0.87567532, (float)0.87249595,
-(float)0.86927933, (float)0.86602539, (float)0.86273432, (float)0.85940641, (float)0.85604161,
-(float)0.85264009, (float)0.84920216, (float)0.84572780, (float)0.84221715, (float)0.83867055,
-(float)0.83508795, (float)0.83146954, (float)0.82781565, (float)0.82412612, (float)0.82040137,
-(float)0.81664157, (float)0.81284660, (float)0.80901700, (float)0.80515265, (float)0.80125374,
-(float)0.79732066, (float)0.79335332, (float)0.78935200, (float)0.78531694, (float)0.78124815,
-(float)0.77714586, (float)0.77301049, (float)0.76884180, (float)0.76464021, (float)0.76040596,
-(float)0.75613904, (float)0.75183970, (float)0.74750835, (float)0.74314481, (float)0.73874938,
-(float)0.73432249, (float)0.72986400, (float)0.72537428, (float)0.72085363, (float)0.71630186,
-(float)0.71171951, (float)0.70710677, (float)0.70246363, (float)0.69779032, (float)0.69308734,
-(float)0.68835449, (float)0.68359220, (float)0.67880070, (float)0.67398006, (float)0.66913044,
-(float)0.66425240, (float)0.65934575, (float)0.65441096, (float)0.64944804, (float)0.64445722,
-(float)0.63943905, (float)0.63439327, (float)0.62932026, (float)0.62422055, (float)0.61909389,
-(float)0.61394072, (float)0.60876143, (float)0.60355592, (float)0.59832448, (float)0.59306765,
-(float)0.58778518, (float)0.58247757, (float)0.57714522, (float)0.57178789, (float)0.56640613,
-(float)0.56100023, (float)0.55557019, (float)0.55011630, (float)0.54463905, (float)0.53913826,
-(float)0.53361434, (float)0.52806783, (float)0.52249849, (float)0.51690674, (float)0.51129305,
-(float)0.50565726, (float)0.50000006, (float)0.49432117, (float)0.48862115, (float)0.48290038,
-(float)0.47715873, (float)0.47139663, (float)0.46561456, (float)0.45981231, (float)0.45399037,
-(float)0.44814920, (float)0.44228864, (float)0.43640912, (float)0.43051112, (float)0.42459446,
-(float)0.41865960, (float)0.41270703, (float)0.40673658, (float)0.40074870, (float)0.39474386,
-(float)0.38872188, (float)0.38268328, (float)0.37662849, (float)0.37055734, (float)0.36447033,
-(float)0.35836792, (float)0.35224995, (float)0.34611690, (float)0.33996922, (float)0.33380675,
-(float)0.32763001, (float)0.32143945, (float)0.31523487, (float)0.30901679, (float)0.30278572,
-(float)0.29654145, (float)0.29028472, (float)0.28401530, (float)0.27773371, (float)0.27144048,
-(float)0.26513538, (float)0.25881892, (float)0.25249159, (float)0.24615324, (float)0.23980433,
-(float)0.23344538, (float)0.22707619, (float)0.22069728, (float)0.21430916, (float)0.20791161,
-(float)0.20150517, (float)0.19509031, (float)0.18866688, (float)0.18223536, (float)0.17579627,
-(float)0.16934940, (float)0.16289529, (float)0.15643445, (float)0.14996666, (float)0.14349243,
-(float)0.13701232, (float)0.13052608, (float)0.12403426, (float)0.11753736, (float)0.11103519,
-(float)0.10452849, (float)0.09801710, (float)0.09150149, (float)0.08498220, (float)0.07845904,
-(float)0.07193252, (float)0.06540315, (float)0.05887074, (float)0.05233581, (float)0.04579888,
-(float)0.03925974, (float)0.03271893, (float)0.02617695, (float)0.01963361, (float)0.01308943,
-(float)0.00654493, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000
-};
-
-
-// Hanning window: for 30ms with 1024 fft with symmetric zeros at 16kHz
-static const float kBlocks480w1024[1024] = {
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00327249, (float)0.00654494,
-(float)0.00981732, (float)0.01308960, (float)0.01636173, (float)0.01963369, (float)0.02290544,
-(float)0.02617695, (float)0.02944817, (float)0.03271908, (float)0.03598964, (float)0.03925982,
-(float)0.04252957, (float)0.04579887, (float)0.04906768, (float)0.05233596, (float)0.05560368,
-(float)0.05887080, (float)0.06213730, (float)0.06540313, (float)0.06866825, (float)0.07193266,
-(float)0.07519628, (float)0.07845910, (float)0.08172107, (float)0.08498218, (float)0.08824237,
-(float)0.09150162, (float)0.09475989, (float)0.09801714, (float)0.10127335, (float)0.10452846,
-(float)0.10778246, (float)0.11103531, (float)0.11428697, (float)0.11753740, (float)0.12078657,
-(float)0.12403446, (float)0.12728101, (float)0.13052620, (float)0.13376999, (float)0.13701233,
-(float)0.14025325, (float)0.14349262, (float)0.14673047, (float)0.14996676, (float)0.15320145,
-(float)0.15643448, (float)0.15966582, (float)0.16289547, (float)0.16612339, (float)0.16934951,
-(float)0.17257382, (float)0.17579629, (float)0.17901687, (float)0.18223552, (float)0.18545224,
-(float)0.18866697, (float)0.19187967, (float)0.19509032, (float)0.19829889, (float)0.20150533,
-(float)0.20470962, (float)0.20791170, (float)0.21111156, (float)0.21430916, (float)0.21750447,
-(float)0.22069745, (float)0.22388805, (float)0.22707628, (float)0.23026206, (float)0.23344538,
-(float)0.23662618, (float)0.23980446, (float)0.24298020, (float)0.24615330, (float)0.24932377,
-(float)0.25249159, (float)0.25565669, (float)0.25881904, (float)0.26197866, (float)0.26513544,
-(float)0.26828939, (float)0.27144045, (float)0.27458861, (float)0.27773386, (float)0.28087610,
-(float)0.28401536, (float)0.28715158, (float)0.29028466, (float)0.29341471, (float)0.29654160,
-(float)0.29966527, (float)0.30278578, (float)0.30590302, (float)0.30901700, (float)0.31212768,
-(float)0.31523499, (float)0.31833893, (float)0.32143945, (float)0.32453656, (float)0.32763019,
-(float)0.33072028, (float)0.33380687, (float)0.33688986, (float)0.33996925, (float)0.34304500,
-(float)0.34611708, (float)0.34918544, (float)0.35225007, (float)0.35531089, (float)0.35836795,
-(float)0.36142117, (float)0.36447051, (float)0.36751595, (float)0.37055743, (float)0.37359497,
-(float)0.37662852, (float)0.37965801, (float)0.38268346, (float)0.38570479, (float)0.38872197,
-(float)0.39173502, (float)0.39474389, (float)0.39774847, (float)0.40074885, (float)0.40374491,
-(float)0.40673664, (float)0.40972406, (float)0.41270703, (float)0.41568562, (float)0.41865975,
-(float)0.42162940, (float)0.42459452, (float)0.42755508, (float)0.43051112, (float)0.43346250,
-(float)0.43640924, (float)0.43935132, (float)0.44228873, (float)0.44522133, (float)0.44814920,
-(float)0.45107228, (float)0.45399052, (float)0.45690390, (float)0.45981237, (float)0.46271592,
-(float)0.46561453, (float)0.46850815, (float)0.47139674, (float)0.47428030, (float)0.47715878,
-(float)0.48003215, (float)0.48290035, (float)0.48576337, (float)0.48862126, (float)0.49147385,
-(float)0.49432120, (float)0.49716330, (float)0.50000000, (float)0.50283140, (float)0.50565743,
-(float)0.50847799, (float)0.51129311, (float)0.51410276, (float)0.51690692, (float)0.51970553,
-(float)0.52249855, (float)0.52528602, (float)0.52806789, (float)0.53084403, (float)0.53361452,
-(float)0.53637928, (float)0.53913832, (float)0.54189163, (float)0.54463905, (float)0.54738063,
-(float)0.55011642, (float)0.55284631, (float)0.55557024, (float)0.55828828, (float)0.56100029,
-(float)0.56370628, (float)0.56640625, (float)0.56910014, (float)0.57178795, (float)0.57446963,
-(float)0.57714522, (float)0.57981455, (float)0.58247769, (float)0.58513463, (float)0.58778524,
-(float)0.59042960, (float)0.59306765, (float)0.59569931, (float)0.59832460, (float)0.60094351,
-(float)0.60355598, (float)0.60616195, (float)0.60876143, (float)0.61135441, (float)0.61394083,
-(float)0.61652070, (float)0.61909395, (float)0.62166059, (float)0.62422055, (float)0.62677383,
-(float)0.62932038, (float)0.63186020, (float)0.63439333, (float)0.63691956, (float)0.63943899,
-(float)0.64195162, (float)0.64445734, (float)0.64695615, (float)0.64944810, (float)0.65193301,
-(float)0.65441096, (float)0.65688187, (float)0.65934587, (float)0.66180271, (float)0.66425246,
-(float)0.66669512, (float)0.66913062, (float)0.67155898, (float)0.67398012, (float)0.67639405,
-(float)0.67880076, (float)0.68120021, (float)0.68359232, (float)0.68597710, (float)0.68835455,
-(float)0.69072467, (float)0.69308740, (float)0.69544262, (float)0.69779050, (float)0.70013082,
-(float)0.70246369, (float)0.70478904, (float)0.70710677, (float)0.70941699, (float)0.71171963,
-(float)0.71401459, (float)0.71630198, (float)0.71858168, (float)0.72085363, (float)0.72311789,
-(float)0.72537440, (float)0.72762316, (float)0.72986406, (float)0.73209721, (float)0.73432255,
-(float)0.73653996, (float)0.73874950, (float)0.74095118, (float)0.74314487, (float)0.74533057,
-(float)0.74750835, (float)0.74967808, (float)0.75183982, (float)0.75399351, (float)0.75613910,
-(float)0.75827658, (float)0.76040596, (float)0.76252723, (float)0.76464027, (float)0.76674515,
-(float)0.76884186, (float)0.77093029, (float)0.77301043, (float)0.77508241, (float)0.77714598,
-(float)0.77920127, (float)0.78124821, (float)0.78328675, (float)0.78531694, (float)0.78733873,
-(float)0.78935206, (float)0.79135692, (float)0.79335338, (float)0.79534125, (float)0.79732066,
-(float)0.79929149, (float)0.80125386, (float)0.80320752, (float)0.80515265, (float)0.80708915,
-(float)0.80901700, (float)0.81093621, (float)0.81284672, (float)0.81474853, (float)0.81664157,
-(float)0.81852591, (float)0.82040149, (float)0.82226825, (float)0.82412618, (float)0.82597536,
-(float)0.82781565, (float)0.82964706, (float)0.83146966, (float)0.83328325, (float)0.83508795,
-(float)0.83688378, (float)0.83867061, (float)0.84044838, (float)0.84221727, (float)0.84397703,
-(float)0.84572780, (float)0.84746957, (float)0.84920216, (float)0.85092574, (float)0.85264021,
-(float)0.85434544, (float)0.85604161, (float)0.85772866, (float)0.85940641, (float)0.86107504,
-(float)0.86273444, (float)0.86438453, (float)0.86602545, (float)0.86765707, (float)0.86927933,
-(float)0.87089235, (float)0.87249607, (float)0.87409031, (float)0.87567532, (float)0.87725097,
-(float)0.87881714, (float)0.88037390, (float)0.88192129, (float)0.88345921, (float)0.88498765,
-(float)0.88650668, (float)0.88801610, (float)0.88951612, (float)0.89100653, (float)0.89248741,
-(float)0.89395881, (float)0.89542055, (float)0.89687276, (float)0.89831537, (float)0.89974827,
-(float)0.90117162, (float)0.90258533, (float)0.90398932, (float)0.90538365, (float)0.90676826,
-(float)0.90814316, (float)0.90950841, (float)0.91086388, (float)0.91220951, (float)0.91354549,
-(float)0.91487163, (float)0.91618794, (float)0.91749454, (float)0.91879123, (float)0.92007810,
-(float)0.92135513, (float)0.92262226, (float)0.92387950, (float)0.92512691, (float)0.92636442,
-(float)0.92759192, (float)0.92880958, (float)0.93001723, (float)0.93121493, (float)0.93240267,
-(float)0.93358046, (float)0.93474817, (float)0.93590593, (float)0.93705362, (float)0.93819135,
-(float)0.93931901, (float)0.94043654, (float)0.94154406, (float)0.94264150, (float)0.94372880,
-(float)0.94480604, (float)0.94587320, (float)0.94693011, (float)0.94797695, (float)0.94901365,
-(float)0.95004016, (float)0.95105654, (float)0.95206273, (float)0.95305866, (float)0.95404440,
-(float)0.95501995, (float)0.95598525, (float)0.95694035, (float)0.95788521, (float)0.95881975,
-(float)0.95974404, (float)0.96065807, (float)0.96156180, (float)0.96245527, (float)0.96333838,
-(float)0.96421117, (float)0.96507370, (float)0.96592581, (float)0.96676767, (float)0.96759909,
-(float)0.96842021, (float)0.96923089, (float)0.97003126, (float)0.97082120, (float)0.97160077,
-(float)0.97236991, (float)0.97312868, (float)0.97387701, (float)0.97461486, (float)0.97534233,
-(float)0.97605932, (float)0.97676587, (float)0.97746199, (float)0.97814763, (float)0.97882277,
-(float)0.97948742, (float)0.98014158, (float)0.98078531, (float)0.98141843, (float)0.98204112,
-(float)0.98265332, (float)0.98325491, (float)0.98384601, (float)0.98442656, (float)0.98499662,
-(float)0.98555607, (float)0.98610497, (float)0.98664331, (float)0.98717111, (float)0.98768836,
-(float)0.98819500, (float)0.98869103, (float)0.98917651, (float)0.98965138, (float)0.99011570,
-(float)0.99056935, (float)0.99101239, (float)0.99144489, (float)0.99186671, (float)0.99227792,
-(float)0.99267852, (float)0.99306846, (float)0.99344778, (float)0.99381649, (float)0.99417448,
-(float)0.99452192, (float)0.99485862, (float)0.99518472, (float)0.99550015, (float)0.99580491,
-(float)0.99609905, (float)0.99638247, (float)0.99665523, (float)0.99691731, (float)0.99716878,
-(float)0.99740952, (float)0.99763954, (float)0.99785894, (float)0.99806762, (float)0.99826562,
-(float)0.99845290, (float)0.99862951, (float)0.99879545, (float)0.99895066, (float)0.99909520,
-(float)0.99922901, (float)0.99935216, (float)0.99946457, (float)0.99956632, (float)0.99965733,
-(float)0.99973762, (float)0.99980724, (float)0.99986613, (float)0.99991435, (float)0.99995178,
-(float)0.99997860, (float)0.99999464, (float)1.00000000, (float)0.99999464, (float)0.99997860,
-(float)0.99995178, (float)0.99991435, (float)0.99986613, (float)0.99980724, (float)0.99973762,
-(float)0.99965733, (float)0.99956632, (float)0.99946457, (float)0.99935216, (float)0.99922901,
-(float)0.99909520, (float)0.99895066, (float)0.99879545, (float)0.99862951, (float)0.99845290,
-(float)0.99826562, (float)0.99806762, (float)0.99785894, (float)0.99763954, (float)0.99740946,
-(float)0.99716872, (float)0.99691731, (float)0.99665523, (float)0.99638247, (float)0.99609905,
-(float)0.99580491, (float)0.99550015, (float)0.99518472, (float)0.99485862, (float)0.99452192,
-(float)0.99417448, (float)0.99381644, (float)0.99344778, (float)0.99306846, (float)0.99267852,
-(float)0.99227792, (float)0.99186671, (float)0.99144489, (float)0.99101239, (float)0.99056935,
-(float)0.99011564, (float)0.98965138, (float)0.98917651, (float)0.98869103, (float)0.98819494,
-(float)0.98768836, (float)0.98717111, (float)0.98664331, (float)0.98610497, (float)0.98555607,
-(float)0.98499656, (float)0.98442656, (float)0.98384601, (float)0.98325491, (float)0.98265326,
-(float)0.98204112, (float)0.98141843, (float)0.98078525, (float)0.98014158, (float)0.97948742,
-(float)0.97882277, (float)0.97814757, (float)0.97746193, (float)0.97676587, (float)0.97605932,
-(float)0.97534227, (float)0.97461486, (float)0.97387695, (float)0.97312862, (float)0.97236991,
-(float)0.97160077, (float)0.97082120, (float)0.97003126, (float)0.96923089, (float)0.96842015,
-(float)0.96759909, (float)0.96676761, (float)0.96592581, (float)0.96507365, (float)0.96421117,
-(float)0.96333838, (float)0.96245521, (float)0.96156180, (float)0.96065807, (float)0.95974404,
-(float)0.95881969, (float)0.95788515, (float)0.95694029, (float)0.95598525, (float)0.95501995,
-(float)0.95404440, (float)0.95305860, (float)0.95206267, (float)0.95105648, (float)0.95004016,
-(float)0.94901365, (float)0.94797695, (float)0.94693011, (float)0.94587314, (float)0.94480604,
-(float)0.94372880, (float)0.94264150, (float)0.94154406, (float)0.94043654, (float)0.93931895,
-(float)0.93819129, (float)0.93705362, (float)0.93590593, (float)0.93474817, (float)0.93358046,
-(float)0.93240267, (float)0.93121493, (float)0.93001723, (float)0.92880952, (float)0.92759192,
-(float)0.92636436, (float)0.92512691, (float)0.92387950, (float)0.92262226, (float)0.92135507,
-(float)0.92007804, (float)0.91879123, (float)0.91749448, (float)0.91618794, (float)0.91487157,
-(float)0.91354543, (float)0.91220951, (float)0.91086382, (float)0.90950835, (float)0.90814310,
-(float)0.90676820, (float)0.90538365, (float)0.90398932, (float)0.90258527, (float)0.90117157,
-(float)0.89974827, (float)0.89831525, (float)0.89687276, (float)0.89542055, (float)0.89395875,
-(float)0.89248741, (float)0.89100647, (float)0.88951600, (float)0.88801610, (float)0.88650662,
-(float)0.88498759, (float)0.88345915, (float)0.88192123, (float)0.88037384, (float)0.87881714,
-(float)0.87725091, (float)0.87567532, (float)0.87409031, (float)0.87249595, (float)0.87089223,
-(float)0.86927933, (float)0.86765701, (float)0.86602539, (float)0.86438447, (float)0.86273432,
-(float)0.86107504, (float)0.85940641, (float)0.85772860, (float)0.85604161, (float)0.85434544,
-(float)0.85264009, (float)0.85092574, (float)0.84920216, (float)0.84746951, (float)0.84572780,
-(float)0.84397697, (float)0.84221715, (float)0.84044844, (float)0.83867055, (float)0.83688372,
-(float)0.83508795, (float)0.83328319, (float)0.83146954, (float)0.82964706, (float)0.82781565,
-(float)0.82597530, (float)0.82412612, (float)0.82226813, (float)0.82040137, (float)0.81852591,
-(float)0.81664157, (float)0.81474847, (float)0.81284660, (float)0.81093609, (float)0.80901700,
-(float)0.80708915, (float)0.80515265, (float)0.80320752, (float)0.80125374, (float)0.79929143,
-(float)0.79732066, (float)0.79534125, (float)0.79335332, (float)0.79135686, (float)0.78935200,
-(float)0.78733861, (float)0.78531694, (float)0.78328675, (float)0.78124815, (float)0.77920121,
-(float)0.77714586, (float)0.77508223, (float)0.77301049, (float)0.77093029, (float)0.76884180,
-(float)0.76674509, (float)0.76464021, (float)0.76252711, (float)0.76040596, (float)0.75827658,
-(float)0.75613904, (float)0.75399339, (float)0.75183970, (float)0.74967796, (float)0.74750835,
-(float)0.74533057, (float)0.74314481, (float)0.74095106, (float)0.73874938, (float)0.73653996,
-(float)0.73432249, (float)0.73209721, (float)0.72986400, (float)0.72762305, (float)0.72537428,
-(float)0.72311789, (float)0.72085363, (float)0.71858162, (float)0.71630186, (float)0.71401453,
-(float)0.71171951, (float)0.70941705, (float)0.70710677, (float)0.70478898, (float)0.70246363,
-(float)0.70013070, (float)0.69779032, (float)0.69544268, (float)0.69308734, (float)0.69072461,
-(float)0.68835449, (float)0.68597704, (float)0.68359220, (float)0.68120021, (float)0.67880070,
-(float)0.67639399, (float)0.67398006, (float)0.67155886, (float)0.66913044, (float)0.66669512,
-(float)0.66425240, (float)0.66180259, (float)0.65934575, (float)0.65688181, (float)0.65441096,
-(float)0.65193301, (float)0.64944804, (float)0.64695609, (float)0.64445722, (float)0.64195150,
-(float)0.63943905, (float)0.63691956, (float)0.63439327, (float)0.63186014, (float)0.62932026,
-(float)0.62677372, (float)0.62422055, (float)0.62166059, (float)0.61909389, (float)0.61652064,
-(float)0.61394072, (float)0.61135429, (float)0.60876143, (float)0.60616189, (float)0.60355592,
-(float)0.60094339, (float)0.59832448, (float)0.59569913, (float)0.59306765, (float)0.59042960,
-(float)0.58778518, (float)0.58513451, (float)0.58247757, (float)0.57981461, (float)0.57714522,
-(float)0.57446963, (float)0.57178789, (float)0.56910002, (float)0.56640613, (float)0.56370628,
-(float)0.56100023, (float)0.55828822, (float)0.55557019, (float)0.55284619, (float)0.55011630,
-(float)0.54738069, (float)0.54463905, (float)0.54189152, (float)0.53913826, (float)0.53637916,
-(float)0.53361434, (float)0.53084403, (float)0.52806783, (float)0.52528596, (float)0.52249849,
-(float)0.51970541, (float)0.51690674, (float)0.51410276, (float)0.51129305, (float)0.50847787,
-(float)0.50565726, (float)0.50283122, (float)0.50000006, (float)0.49716327, (float)0.49432117,
-(float)0.49147379, (float)0.48862115, (float)0.48576325, (float)0.48290038, (float)0.48003212,
-(float)0.47715873, (float)0.47428021, (float)0.47139663, (float)0.46850798, (float)0.46561456,
-(float)0.46271589, (float)0.45981231, (float)0.45690379, (float)0.45399037, (float)0.45107210,
-(float)0.44814920, (float)0.44522130, (float)0.44228864, (float)0.43935123, (float)0.43640912,
-(float)0.43346232, (float)0.43051112, (float)0.42755505, (float)0.42459446, (float)0.42162928,
-(float)0.41865960, (float)0.41568545, (float)0.41270703, (float)0.40972400, (float)0.40673658,
-(float)0.40374479, (float)0.40074870, (float)0.39774850, (float)0.39474386, (float)0.39173496,
-(float)0.38872188, (float)0.38570464, (float)0.38268328, (float)0.37965804, (float)0.37662849,
-(float)0.37359491, (float)0.37055734, (float)0.36751580, (float)0.36447033, (float)0.36142117,
-(float)0.35836792, (float)0.35531086, (float)0.35224995, (float)0.34918529, (float)0.34611690,
-(float)0.34304500, (float)0.33996922, (float)0.33688980, (float)0.33380675, (float)0.33072016,
-(float)0.32763001, (float)0.32453656, (float)0.32143945, (float)0.31833887, (float)0.31523487,
-(float)0.31212750, (float)0.30901679, (float)0.30590302, (float)0.30278572, (float)0.29966521,
-(float)0.29654145, (float)0.29341453, (float)0.29028472, (float)0.28715155, (float)0.28401530,
-(float)0.28087601, (float)0.27773371, (float)0.27458847, (float)0.27144048, (float)0.26828936,
-(float)0.26513538, (float)0.26197854, (float)0.25881892, (float)0.25565651, (float)0.25249159,
-(float)0.24932374, (float)0.24615324, (float)0.24298008, (float)0.23980433, (float)0.23662600,
-(float)0.23344538, (float)0.23026201, (float)0.22707619, (float)0.22388794, (float)0.22069728,
-(float)0.21750426, (float)0.21430916, (float)0.21111152, (float)0.20791161, (float)0.20470949,
-(float)0.20150517, (float)0.19829892, (float)0.19509031, (float)0.19187963, (float)0.18866688,
-(float)0.18545210, (float)0.18223536, (float)0.17901689, (float)0.17579627, (float)0.17257376,
-(float)0.16934940, (float)0.16612324, (float)0.16289529, (float)0.15966584, (float)0.15643445,
-(float)0.15320137, (float)0.14996666, (float)0.14673033, (float)0.14349243, (float)0.14025325,
-(float)0.13701232, (float)0.13376991, (float)0.13052608, (float)0.12728085, (float)0.12403426,
-(float)0.12078657, (float)0.11753736, (float)0.11428688, (float)0.11103519, (float)0.10778230,
-(float)0.10452849, (float)0.10127334, (float)0.09801710, (float)0.09475980, (float)0.09150149,
-(float)0.08824220, (float)0.08498220, (float)0.08172106, (float)0.07845904, (float)0.07519618,
-(float)0.07193252, (float)0.06866808, (float)0.06540315, (float)0.06213728, (float)0.05887074,
-(float)0.05560357, (float)0.05233581, (float)0.04906749, (float)0.04579888, (float)0.04252954,
-(float)0.03925974, (float)0.03598953, (float)0.03271893, (float)0.02944798, (float)0.02617695,
-(float)0.02290541, (float)0.01963361, (float)0.01636161, (float)0.01308943, (float)0.00981712,
-(float)0.00654493, (float)0.00327244, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
-(float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000};
-
-#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_
diff --git a/src/modules/audio_processing/ns/noise_suppression.c b/src/modules/audio_processing/ns/noise_suppression.c
new file mode 100644
index 0000000..d33caa9
--- /dev/null
+++ b/src/modules/audio_processing/ns/noise_suppression.c
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "noise_suppression.h"
+#include "ns_core.h"
+#include "defines.h"
+
+int WebRtcNs_get_version(char* versionStr, short length) {
+  const char version[] = "NS 2.2.0";
+  const short versionLen = (short)strlen(version) + 1; // +1: null-termination
+
+  if (versionStr == NULL) {
+    return -1;
+  }
+
+  if (versionLen > length) {
+    return -1;
+  }
+
+  strncpy(versionStr, version, versionLen);
+
+  return 0;
+}
+
+int WebRtcNs_Create(NsHandle** NS_inst) {
+  *NS_inst = (NsHandle*) malloc(sizeof(NSinst_t));
+  if (*NS_inst != NULL) {
+    (*(NSinst_t**)NS_inst)->initFlag = 0;
+    return 0;
+  } else {
+    return -1;
+  }
+
+}
+
+int WebRtcNs_Free(NsHandle* NS_inst) {
+  free(NS_inst);
+  return 0;
+}
+
+
+int WebRtcNs_Init(NsHandle* NS_inst, WebRtc_UWord32 fs) {
+  return WebRtcNs_InitCore((NSinst_t*) NS_inst, fs);
+}
+
+int WebRtcNs_set_policy(NsHandle* NS_inst, int mode) {
+  return WebRtcNs_set_policy_core((NSinst_t*) NS_inst, mode);
+}
+
+
+int WebRtcNs_Process(NsHandle* NS_inst, short* spframe, short* spframe_H,
+                     short* outframe, short* outframe_H) {
+  return WebRtcNs_ProcessCore(
+      (NSinst_t*) NS_inst, spframe, spframe_H, outframe, outframe_H);
+}
diff --git a/src/modules/audio_processing/ns/noise_suppression_x.c b/src/modules/audio_processing/ns/noise_suppression_x.c
new file mode 100644
index 0000000..afdea7b
--- /dev/null
+++ b/src/modules/audio_processing/ns/noise_suppression_x.c
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "noise_suppression_x.h"
+#include "nsx_core.h"
+#include "nsx_defines.h"
+
+int WebRtcNsx_get_version(char* versionStr, short length) {
+  const char version[] = "NS\t3.1.0";
+  const short versionLen = (short)strlen(version) + 1; // +1: null-termination
+
+  if (versionStr == NULL) {
+    return -1;
+  }
+
+  if (versionLen > length) {
+    return -1;
+  }
+
+  strncpy(versionStr, version, versionLen);
+
+  return 0;
+}
+
+int WebRtcNsx_Create(NsxHandle** nsxInst) {
+  *nsxInst = (NsxHandle*)malloc(sizeof(NsxInst_t));
+  if (*nsxInst != NULL) {
+    (*(NsxInst_t**)nsxInst)->initFlag = 0;
+    return 0;
+  } else {
+    return -1;
+  }
+
+}
+
+int WebRtcNsx_Free(NsxHandle* nsxInst) {
+  free(nsxInst);
+  return 0;
+}
+
+int WebRtcNsx_Init(NsxHandle* nsxInst, WebRtc_UWord32 fs) {
+  return WebRtcNsx_InitCore((NsxInst_t*)nsxInst, fs);
+}
+
+int WebRtcNsx_set_policy(NsxHandle* nsxInst, int mode) {
+  return WebRtcNsx_set_policy_core((NsxInst_t*)nsxInst, mode);
+}
+
+int WebRtcNsx_Process(NsxHandle* nsxInst, short* speechFrame,
+                      short* speechFrameHB, short* outFrame,
+                      short* outFrameHB) {
+  return WebRtcNsx_ProcessCore(
+      (NsxInst_t*)nsxInst, speechFrame, speechFrameHB, outFrame, outFrameHB);
+}
+
diff --git a/src/modules/audio_processing/ns/main/source/ns.gyp b/src/modules/audio_processing/ns/ns.gypi
similarity index 65%
rename from src/modules/audio_processing/ns/main/source/ns.gyp
rename to src/modules/audio_processing/ns/ns.gypi
index c8488b2..3e3d2e1 100644
--- a/src/modules/audio_processing/ns/main/source/ns.gyp
+++ b/src/modules/audio_processing/ns/ns.gypi
@@ -7,27 +7,24 @@
 # be found in the AUTHORS file in the root of the source tree.
 
 {
-  'includes': [
-    '../../../../../common_settings.gypi',
-  ],
   'targets': [
     {
       'target_name': 'ns',
       'type': '<(library)',
       'dependencies': [
-        '../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
-        '../../../utility/util.gyp:apm_util'
+        '<(webrtc_root)/common_audio/common_audio.gyp:signal_processing',
+        'apm_util'
       ],
       'include_dirs': [
-        '../interface',
+        'interface',
       ],
       'direct_dependent_settings': {
         'include_dirs': [
-          '../interface',
+          'interface',
         ],
       },
       'sources': [
-        '../interface/noise_suppression.h',
+        'interface/noise_suppression.h',
         'noise_suppression.c',
         'windows_private.h',
         'defines.h',
@@ -39,18 +36,19 @@
       'target_name': 'ns_fix',
       'type': '<(library)',
       'dependencies': [
-        '../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
+        '<(webrtc_root)/common_audio/common_audio.gyp:signal_processing',
+        '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
       ],
       'include_dirs': [
-        '../interface',
+        'interface',
       ],
       'direct_dependent_settings': {
         'include_dirs': [
-          '../interface',
+          'interface',
         ],
       },
       'sources': [
-        '../interface/noise_suppression_x.h',
+        'interface/noise_suppression_x.h',
         'noise_suppression_x.c',
         'nsx_defines.h',
         'nsx_core.c',
@@ -59,9 +57,3 @@
     },
   ],
 }
-
-# Local Variables:
-# tab-width:2
-# indent-tabs-mode:nil
-# End:
-# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/modules/audio_processing/ns/ns_core.c b/src/modules/audio_processing/ns/ns_core.c
new file mode 100644
index 0000000..e80f699
--- /dev/null
+++ b/src/modules/audio_processing/ns/ns_core.c
@@ -0,0 +1,1305 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+#include <math.h>
+//#include <stdio.h>
+#include <stdlib.h>
+#include "noise_suppression.h"
+#include "ns_core.h"
+#include "windows_private.h"
+#include "fft4g.h"
+#include "signal_processing_library.h"
+
+// Set Feature Extraction Parameters
+void WebRtcNs_set_feature_extraction_parameters(NSinst_t* inst) {
+  //bin size of histogram
+  inst->featureExtractionParams.binSizeLrt      = (float)0.1;
+  inst->featureExtractionParams.binSizeSpecFlat = (float)0.05;
+  inst->featureExtractionParams.binSizeSpecDiff = (float)0.1;
+
+  //range of histogram over which lrt threshold is computed
+  inst->featureExtractionParams.rangeAvgHistLrt = (float)1.0;
+
+  //scale parameters: multiply dominant peaks of the histograms by scale factor to obtain
+  // thresholds for prior model
+  inst->featureExtractionParams.factor1ModelPars = (float)1.20; //for lrt and spectral diff
+  inst->featureExtractionParams.factor2ModelPars = (float)0.9;  //for spectral_flatness:
+  // used when noise is flatter than speech
+
+  //peak limit for spectral flatness (varies between 0 and 1)
+  inst->featureExtractionParams.thresPosSpecFlat = (float)0.6;
+
+  //limit on spacing of two highest peaks in histogram: spacing determined by bin size
+  inst->featureExtractionParams.limitPeakSpacingSpecFlat = 
+      2 * inst->featureExtractionParams.binSizeSpecFlat;
+  inst->featureExtractionParams.limitPeakSpacingSpecDiff =
+      2 * inst->featureExtractionParams.binSizeSpecDiff;
+
+  //limit on relevance of second peak:
+  inst->featureExtractionParams.limitPeakWeightsSpecFlat = (float)0.5;
+  inst->featureExtractionParams.limitPeakWeightsSpecDiff = (float)0.5;
+
+  // fluctuation limit of lrt feature
+  inst->featureExtractionParams.thresFluctLrt = (float)0.05;
+
+  //limit on the max and min values for the feature thresholds
+  inst->featureExtractionParams.maxLrt = (float)1.0;
+  inst->featureExtractionParams.minLrt = (float)0.20;
+
+  inst->featureExtractionParams.maxSpecFlat = (float)0.95;
+  inst->featureExtractionParams.minSpecFlat = (float)0.10;
+
+  inst->featureExtractionParams.maxSpecDiff = (float)1.0;
+  inst->featureExtractionParams.minSpecDiff = (float)0.16;
+
+  //criteria of weight of histogram peak  to accept/reject feature
+  inst->featureExtractionParams.thresWeightSpecFlat = (int)(0.3
+      * (inst->modelUpdatePars[1])); //for spectral flatness
+  inst->featureExtractionParams.thresWeightSpecDiff = (int)(0.3
+      * (inst->modelUpdatePars[1])); //for spectral difference
+}
+
+// Initialize state
+int WebRtcNs_InitCore(NSinst_t* inst, WebRtc_UWord32 fs) {
+  int i;
+  //We only support 10ms frames
+
+  //check for valid pointer
+  if (inst == NULL) {
+    return -1;
+  }
+
+  // Initialization of struct
+  if (fs == 8000 || fs == 16000 || fs == 32000) {
+    inst->fs = fs;
+  } else {
+    return -1;
+  }
+  inst->windShift = 0;
+  if (fs == 8000) {
+    // We only support 10ms frames
+    inst->blockLen = 80;
+    inst->blockLen10ms = 80;
+    inst->anaLen = 128;
+    inst->window = kBlocks80w128;
+    inst->outLen = 0;
+  } else if (fs == 16000) {
+    // We only support 10ms frames
+    inst->blockLen = 160;
+    inst->blockLen10ms = 160;
+    inst->anaLen = 256;
+    inst->window = kBlocks160w256;
+    inst->outLen = 0;
+  } else if (fs == 32000) {
+    // We only support 10ms frames
+    inst->blockLen = 160;
+    inst->blockLen10ms = 160;
+    inst->anaLen = 256;
+    inst->window = kBlocks160w256;
+    inst->outLen = 0;
+  }
+  inst->magnLen = inst->anaLen / 2 + 1; // Number of frequency bins
+
+  // Initialize fft work arrays.
+  inst->ip[0] = 0; // Setting this triggers initialization.
+  memset(inst->dataBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
+  WebRtc_rdft(inst->anaLen, 1, inst->dataBuf, inst->ip, inst->wfft);
+
+  memset(inst->dataBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
+  memset(inst->syntBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
+
+  //for HB processing
+  memset(inst->dataBufHB, 0, sizeof(float) * ANAL_BLOCKL_MAX);
+
+  //for quantile noise estimation
+  memset(inst->quantile, 0, sizeof(float) * HALF_ANAL_BLOCKL);
+  for (i = 0; i < SIMULT * HALF_ANAL_BLOCKL; i++) {
+    inst->lquantile[i] = (float)8.0;
+    inst->density[i] = (float)0.3;
+  }
+
+  for (i = 0; i < SIMULT; i++) {
+    inst->counter[i] = (int)floor((float)(END_STARTUP_LONG * (i + 1)) / (float)SIMULT);
+  }
+
+  inst->updates = 0;
+
+  // Wiener filter initialization
+  for (i = 0; i < HALF_ANAL_BLOCKL; i++) {
+    inst->smooth[i] = (float)1.0;
+  }
+
+  // Set the aggressiveness: default
+  inst->aggrMode = 0;
+
+  //initialize variables for new method
+  inst->priorSpeechProb = (float)0.5; //prior prob for speech/noise
+  for (i = 0; i < HALF_ANAL_BLOCKL; i++) {
+    inst->magnPrev[i]      = (float)0.0; //previous mag spectrum
+    inst->noisePrev[i]     = (float)0.0; //previous noise-spectrum
+    inst->logLrtTimeAvg[i] = LRT_FEATURE_THR; //smooth LR ratio (same as threshold)
+    inst->magnAvgPause[i]  = (float)0.0; //conservative noise spectrum estimate
+    inst->speechProbHB[i]  = (float)0.0; //for estimation of HB in second pass
+    inst->initMagnEst[i]   = (float)0.0; //initial average mag spectrum
+  }
+
+  //feature quantities
+  inst->featureData[0] = SF_FEATURE_THR;  //spectral flatness (start on threshold)
+  inst->featureData[1] = (float)0.0;      //spectral entropy: not used in this version
+  inst->featureData[2] = (float)0.0;      //spectral variance: not used in this version
+  inst->featureData[3] = LRT_FEATURE_THR; //average lrt factor (start on threshold)
+  inst->featureData[4] = SF_FEATURE_THR;  //spectral template diff (start on threshold)
+  inst->featureData[5] = (float)0.0;      //normalization for spectral-diff
+  inst->featureData[6] = (float)0.0;      //window time-average of input magnitude spectrum
+
+  //histogram quantities: used to estimate/update thresholds for features
+  for (i = 0; i < HIST_PAR_EST; i++) {
+    inst->histLrt[i] = 0;
+    inst->histSpecFlat[i] = 0;
+    inst->histSpecDiff[i] = 0;
+  }
+
+  inst->blockInd = -1; //frame counter
+  inst->priorModelPars[0] = LRT_FEATURE_THR; //default threshold for lrt feature
+  inst->priorModelPars[1] = (float)0.5;      //threshold for spectral flatness:
+  // determined on-line
+  inst->priorModelPars[2] = (float)1.0;      //sgn_map par for spectral measure:
+  // 1 for flatness measure
+  inst->priorModelPars[3] = (float)0.5;      //threshold for template-difference feature:
+  // determined on-line
+  inst->priorModelPars[4] = (float)1.0;      //default weighting parameter for lrt feature
+  inst->priorModelPars[5] = (float)0.0;      //default weighting parameter for
+  // spectral flatness feature
+  inst->priorModelPars[6] = (float)0.0;      //default weighting parameter for
+  // spectral difference feature
+
+  inst->modelUpdatePars[0] = 2;   //update flag for parameters:
+  // 0 no update, 1=update once, 2=update every window
+  inst->modelUpdatePars[1] = 500; //window for update
+  inst->modelUpdatePars[2] = 0;   //counter for update of conservative noise spectrum
+  //counter if the feature thresholds are updated during the sequence
+  inst->modelUpdatePars[3] = inst->modelUpdatePars[1];
+
+  inst->signalEnergy = 0.0;
+  inst->sumMagn = 0.0;
+  inst->whiteNoiseLevel = 0.0;
+  inst->pinkNoiseNumerator = 0.0;
+  inst->pinkNoiseExp = 0.0;
+
+  WebRtcNs_set_feature_extraction_parameters(inst); // Set feature configuration
+
+  //default mode
+  WebRtcNs_set_policy_core(inst, 0);
+
+
+  memset(inst->outBuf, 0, sizeof(float) * 3 * BLOCKL_MAX);
+
+  inst->initFlag = 1;
+  return 0;
+}
+
+int WebRtcNs_set_policy_core(NSinst_t* inst, int mode) {
+  // allow for modes:0,1,2,3
+  if (mode < 0 || mode > 3) {
+    return (-1);
+  }
+
+  inst->aggrMode = mode;
+  if (mode == 0) {
+    inst->overdrive = (float)1.0;
+    inst->denoiseBound = (float)0.5;
+    inst->gainmap = 0;
+  } else if (mode == 1) {
+    //inst->overdrive = (float)1.25;
+    inst->overdrive = (float)1.0;
+    inst->denoiseBound = (float)0.25;
+    inst->gainmap = 1;
+  } else if (mode == 2) {
+    //inst->overdrive = (float)1.25;
+    inst->overdrive = (float)1.1;
+    inst->denoiseBound = (float)0.125;
+    inst->gainmap = 1;
+  } else if (mode == 3) {
+    //inst->overdrive = (float)1.30;
+    inst->overdrive = (float)1.25;
+    inst->denoiseBound = (float)0.09;
+    inst->gainmap = 1;
+  }
+  return 0;
+}
+
+// Estimate noise
+void WebRtcNs_NoiseEstimation(NSinst_t* inst, float* magn, float* noise) {
+  int i, s, offset;
+  float lmagn[HALF_ANAL_BLOCKL], delta;
+
+  if (inst->updates < END_STARTUP_LONG) {
+    inst->updates++;
+  }
+
+  for (i = 0; i < inst->magnLen; i++) {
+    lmagn[i] = (float)log(magn[i]);
+  }
+
+  // loop over simultaneous estimates
+  for (s = 0; s < SIMULT; s++) {
+    offset = s * inst->magnLen;
+
+    // newquantest(...)
+    for (i = 0; i < inst->magnLen; i++) {
+      // compute delta
+      if (inst->density[offset + i] > 1.0) {
+        delta = FACTOR * (float)1.0 / inst->density[offset + i];
+      } else {
+        delta = FACTOR;
+      }
+
+      // update log quantile estimate
+      if (lmagn[i] > inst->lquantile[offset + i]) {
+        inst->lquantile[offset + i] += QUANTILE * delta
+                                       / (float)(inst->counter[s] + 1);
+      } else {
+        inst->lquantile[offset + i] -= ((float)1.0 - QUANTILE) * delta
+                                       / (float)(inst->counter[s] + 1);
+      }
+
+      // update density estimate
+      if (fabs(lmagn[i] - inst->lquantile[offset + i]) < WIDTH) {
+        inst->density[offset + i] = ((float)inst->counter[s] * inst->density[offset
+            + i] + (float)1.0 / ((float)2.0 * WIDTH)) / (float)(inst->counter[s] + 1);
+      }
+    } // end loop over magnitude spectrum
+
+    if (inst->counter[s] >= END_STARTUP_LONG) {
+      inst->counter[s] = 0;
+      if (inst->updates >= END_STARTUP_LONG) {
+        for (i = 0; i < inst->magnLen; i++) {
+          inst->quantile[i] = (float)exp(inst->lquantile[offset + i]);
+        }
+      }
+    }
+
+    inst->counter[s]++;
+  } // end loop over simultaneous estimates
+
+  // Sequentially update the noise during startup
+  if (inst->updates < END_STARTUP_LONG) {
+    // Use the last "s" to get noise during startup that differ from zero.
+    for (i = 0; i < inst->magnLen; i++) {
+      inst->quantile[i] = (float)exp(inst->lquantile[offset + i]);
+    }
+  }
+
+  for (i = 0; i < inst->magnLen; i++) {
+    noise[i] = inst->quantile[i];
+  }
+}
+
+// Extract thresholds for feature parameters
+// histograms are computed over some window_size (given by inst->modelUpdatePars[1])
+// thresholds and weights are extracted every window
+// flag 0 means update histogram only, flag 1 means compute the thresholds/weights
+// threshold and weights are returned in: inst->priorModelPars
+void WebRtcNs_FeatureParameterExtraction(NSinst_t* inst, int flag) {
+  int i, useFeatureSpecFlat, useFeatureSpecDiff, numHistLrt;
+  int maxPeak1, maxPeak2;
+  int weightPeak1SpecFlat, weightPeak2SpecFlat, weightPeak1SpecDiff, weightPeak2SpecDiff;
+
+  float binMid, featureSum;
+  float posPeak1SpecFlat, posPeak2SpecFlat, posPeak1SpecDiff, posPeak2SpecDiff;
+  float fluctLrt, avgHistLrt, avgSquareHistLrt, avgHistLrtCompl;
+
+  //3 features: lrt, flatness, difference
+  //lrt_feature = inst->featureData[3];
+  //flat_feature = inst->featureData[0];
+  //diff_feature = inst->featureData[4];
+
+  //update histograms
+  if (flag == 0) {
+    // LRT
+    if ((inst->featureData[3] < HIST_PAR_EST * inst->featureExtractionParams.binSizeLrt)
+        && (inst->featureData[3] >= 0.0)) {
+      i = (int)(inst->featureData[3] / inst->featureExtractionParams.binSizeLrt);
+      inst->histLrt[i]++;
+    }
+    // Spectral flatness
+    if ((inst->featureData[0] < HIST_PAR_EST
+         * inst->featureExtractionParams.binSizeSpecFlat)
+        && (inst->featureData[0] >= 0.0)) {
+      i = (int)(inst->featureData[0] / inst->featureExtractionParams.binSizeSpecFlat);
+      inst->histSpecFlat[i]++;
+    }
+    // Spectral difference
+    if ((inst->featureData[4] < HIST_PAR_EST
+         * inst->featureExtractionParams.binSizeSpecDiff)
+        && (inst->featureData[4] >= 0.0)) {
+      i = (int)(inst->featureData[4] / inst->featureExtractionParams.binSizeSpecDiff);
+      inst->histSpecDiff[i]++;
+    }
+  }
+
+  // extract parameters for speech/noise probability
+  if (flag == 1) {
+    //lrt feature: compute the average over inst->featureExtractionParams.rangeAvgHistLrt
+    avgHistLrt = 0.0;
+    avgHistLrtCompl = 0.0;
+    avgSquareHistLrt = 0.0;
+    numHistLrt = 0;
+    for (i = 0; i < HIST_PAR_EST; i++) {
+      binMid = ((float)i + (float)0.5) * inst->featureExtractionParams.binSizeLrt;
+      if (binMid <= inst->featureExtractionParams.rangeAvgHistLrt) {
+        avgHistLrt += inst->histLrt[i] * binMid;
+        numHistLrt += inst->histLrt[i];
+      }
+      avgSquareHistLrt += inst->histLrt[i] * binMid * binMid;
+      avgHistLrtCompl += inst->histLrt[i] * binMid;
+    }
+    if (numHistLrt > 0) {
+      avgHistLrt = avgHistLrt / ((float)numHistLrt);
+    }
+    avgHistLrtCompl = avgHistLrtCompl / ((float)inst->modelUpdatePars[1]);
+    avgSquareHistLrt = avgSquareHistLrt / ((float)inst->modelUpdatePars[1]);
+    fluctLrt = avgSquareHistLrt - avgHistLrt * avgHistLrtCompl;
+    // get threshold for lrt feature:
+    if (fluctLrt < inst->featureExtractionParams.thresFluctLrt) {
+      //very low fluct, so likely noise
+      inst->priorModelPars[0] = inst->featureExtractionParams.maxLrt;
+    } else {
+      inst->priorModelPars[0] = inst->featureExtractionParams.factor1ModelPars
+                                * avgHistLrt;
+      // check if value is within min/max range
+      if (inst->priorModelPars[0] < inst->featureExtractionParams.minLrt) {
+        inst->priorModelPars[0] = inst->featureExtractionParams.minLrt;
+      }
+      if (inst->priorModelPars[0] > inst->featureExtractionParams.maxLrt) {
+        inst->priorModelPars[0] = inst->featureExtractionParams.maxLrt;
+      }
+    }
+    // done with lrt feature
+
+    //
+    // for spectral flatness and spectral difference: compute the main peaks of histogram
+    maxPeak1 = 0;
+    maxPeak2 = 0;
+    posPeak1SpecFlat = 0.0;
+    posPeak2SpecFlat = 0.0;
+    weightPeak1SpecFlat = 0;
+    weightPeak2SpecFlat = 0;
+
+    // peaks for flatness
+    for (i = 0; i < HIST_PAR_EST; i++) {
+      binMid = ((float)i + (float)0.5) * inst->featureExtractionParams.binSizeSpecFlat;
+      if (inst->histSpecFlat[i] > maxPeak1) {
+        // Found new "first" peak
+        maxPeak2 = maxPeak1;
+        weightPeak2SpecFlat = weightPeak1SpecFlat;
+        posPeak2SpecFlat = posPeak1SpecFlat;
+
+        maxPeak1 = inst->histSpecFlat[i];
+        weightPeak1SpecFlat = inst->histSpecFlat[i];
+        posPeak1SpecFlat = binMid;
+      } else if (inst->histSpecFlat[i] > maxPeak2) {
+        // Found new "second" peak
+        maxPeak2 = inst->histSpecFlat[i];
+        weightPeak2SpecFlat = inst->histSpecFlat[i];
+        posPeak2SpecFlat = binMid;
+      }
+    }
+
+    //compute two peaks for spectral difference
+    maxPeak1 = 0;
+    maxPeak2 = 0;
+    posPeak1SpecDiff = 0.0;
+    posPeak2SpecDiff = 0.0;
+    weightPeak1SpecDiff = 0;
+    weightPeak2SpecDiff = 0;
+    // peaks for spectral difference
+    for (i = 0; i < HIST_PAR_EST; i++) {
+      binMid = ((float)i + (float)0.5) * inst->featureExtractionParams.binSizeSpecDiff;
+      if (inst->histSpecDiff[i] > maxPeak1) {
+        // Found new "first" peak
+        maxPeak2 = maxPeak1;
+        weightPeak2SpecDiff = weightPeak1SpecDiff;
+        posPeak2SpecDiff = posPeak1SpecDiff;
+
+        maxPeak1 = inst->histSpecDiff[i];
+        weightPeak1SpecDiff = inst->histSpecDiff[i];
+        posPeak1SpecDiff = binMid;
+      } else if (inst->histSpecDiff[i] > maxPeak2) {
+        // Found new "second" peak
+        maxPeak2 = inst->histSpecDiff[i];
+        weightPeak2SpecDiff = inst->histSpecDiff[i];
+        posPeak2SpecDiff = binMid;
+      }
+    }
+
+    // for spectrum flatness feature
+    useFeatureSpecFlat = 1;
+    // merge the two peaks if they are close
+    if ((fabs(posPeak2SpecFlat - posPeak1SpecFlat)
+         < inst->featureExtractionParams.limitPeakSpacingSpecFlat)
+        && (weightPeak2SpecFlat
+            > inst->featureExtractionParams.limitPeakWeightsSpecFlat
+            * weightPeak1SpecFlat)) {
+      weightPeak1SpecFlat += weightPeak2SpecFlat;
+      posPeak1SpecFlat = (float)0.5 * (posPeak1SpecFlat + posPeak2SpecFlat);
+    }
+    //reject if weight of peaks is not large enough, or peak value too small
+    if (weightPeak1SpecFlat < inst->featureExtractionParams.thresWeightSpecFlat
+        || posPeak1SpecFlat < inst->featureExtractionParams.thresPosSpecFlat) {
+      useFeatureSpecFlat = 0;
+    }
+    // if selected, get the threshold
+    if (useFeatureSpecFlat == 1) {
+      // compute the threshold
+      inst->priorModelPars[1] = inst->featureExtractionParams.factor2ModelPars
+                                * posPeak1SpecFlat;
+      //check if value is within min/max range
+      if (inst->priorModelPars[1] < inst->featureExtractionParams.minSpecFlat) {
+        inst->priorModelPars[1] = inst->featureExtractionParams.minSpecFlat;
+      }
+      if (inst->priorModelPars[1] > inst->featureExtractionParams.maxSpecFlat) {
+        inst->priorModelPars[1] = inst->featureExtractionParams.maxSpecFlat;
+      }
+    }
+    // done with flatness feature
+
+    // for template feature
+    useFeatureSpecDiff = 1;
+    // merge the two peaks if they are close
+    if ((fabs(posPeak2SpecDiff - posPeak1SpecDiff)
+         < inst->featureExtractionParams.limitPeakSpacingSpecDiff)
+        && (weightPeak2SpecDiff
+            > inst->featureExtractionParams.limitPeakWeightsSpecDiff
+            * weightPeak1SpecDiff)) {
+      weightPeak1SpecDiff += weightPeak2SpecDiff;
+      posPeak1SpecDiff = (float)0.5 * (posPeak1SpecDiff + posPeak2SpecDiff);
+    }
+    // get the threshold value
+    inst->priorModelPars[3] = inst->featureExtractionParams.factor1ModelPars
+                              * posPeak1SpecDiff;
+    //reject if weight of peaks is not large enough
+    if (weightPeak1SpecDiff < inst->featureExtractionParams.thresWeightSpecDiff) {
+      useFeatureSpecDiff = 0;
+    }
+    //check if value is within min/max range
+    if (inst->priorModelPars[3] < inst->featureExtractionParams.minSpecDiff) {
+      inst->priorModelPars[3] = inst->featureExtractionParams.minSpecDiff;
+    }
+    if (inst->priorModelPars[3] > inst->featureExtractionParams.maxSpecDiff) {
+      inst->priorModelPars[3] = inst->featureExtractionParams.maxSpecDiff;
+    }
+    // done with spectral difference feature
+
+    // don't use template feature if fluctuation of lrt feature is very low:
+    //  most likely just noise state
+    if (fluctLrt < inst->featureExtractionParams.thresFluctLrt) {
+      useFeatureSpecDiff = 0;
+    }
+
+    // select the weights between the features
+    // inst->priorModelPars[4] is weight for lrt: always selected
+    // inst->priorModelPars[5] is weight for spectral flatness
+    // inst->priorModelPars[6] is weight for spectral difference
+    featureSum = (float)(1 + useFeatureSpecFlat + useFeatureSpecDiff);
+    inst->priorModelPars[4] = (float)1.0 / featureSum;
+    inst->priorModelPars[5] = ((float)useFeatureSpecFlat) / featureSum;
+    inst->priorModelPars[6] = ((float)useFeatureSpecDiff) / featureSum;
+
+    // set hists to zero for next update
+    if (inst->modelUpdatePars[0] >= 1) {
+      for (i = 0; i < HIST_PAR_EST; i++) {
+        inst->histLrt[i] = 0;
+        inst->histSpecFlat[i] = 0;
+        inst->histSpecDiff[i] = 0;
+      }
+    }
+  } // end of flag == 1
+}
+
+// Compute spectral flatness on input spectrum
+// magnIn is the magnitude spectrum
+// spectral flatness is returned in inst->featureData[0]
+void WebRtcNs_ComputeSpectralFlatness(NSinst_t* inst, float* magnIn) {
+  int i;
+  int shiftLP = 1; //option to remove first bin(s) from spectral measures
+  float avgSpectralFlatnessNum, avgSpectralFlatnessDen, spectralTmp;
+
+  // comute spectral measures
+  // for flatness
+  avgSpectralFlatnessNum = 0.0;
+  avgSpectralFlatnessDen = inst->sumMagn;
+  for (i = 0; i < shiftLP; i++) {
+    avgSpectralFlatnessDen -= magnIn[i];
+  }
+  // compute log of ratio of the geometric to arithmetic mean: check for log(0) case
+  for (i = shiftLP; i < inst->magnLen; i++) {
+    if (magnIn[i] > 0.0) {
+      avgSpectralFlatnessNum += (float)log(magnIn[i]);
+    } else {
+      inst->featureData[0] -= SPECT_FL_TAVG * inst->featureData[0];
+      return;
+    }
+  }
+  //normalize
+  avgSpectralFlatnessDen = avgSpectralFlatnessDen / inst->magnLen;
+  avgSpectralFlatnessNum = avgSpectralFlatnessNum / inst->magnLen;
+
+  //ratio and inverse log: check for case of log(0)
+  spectralTmp = (float)exp(avgSpectralFlatnessNum) / avgSpectralFlatnessDen;
+
+  //time-avg update of spectral flatness feature
+  inst->featureData[0] += SPECT_FL_TAVG * (spectralTmp - inst->featureData[0]);
+  // done with flatness feature
+}
+
+// Compute the difference measure between input spectrum and a template/learned noise spectrum
+// magnIn is the input spectrum
+// the reference/template spectrum is inst->magnAvgPause[i]
+// returns (normalized) spectral difference in inst->featureData[4]
+void WebRtcNs_ComputeSpectralDifference(NSinst_t* inst, float* magnIn) {
+  // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 / var(magnAvgPause)
+  int i;
+  float avgPause, avgMagn, covMagnPause, varPause, varMagn, avgDiffNormMagn;
+
+  avgPause = 0.0;
+  avgMagn = inst->sumMagn;
+  // compute average quantities
+  for (i = 0; i < inst->magnLen; i++) {
+    //conservative smooth noise spectrum from pause frames
+    avgPause += inst->magnAvgPause[i];
+  }
+  avgPause = avgPause / ((float)inst->magnLen);
+  avgMagn = avgMagn / ((float)inst->magnLen);
+
+  covMagnPause = 0.0;
+  varPause = 0.0;
+  varMagn = 0.0;
+  // compute variance and covariance quantities
+  for (i = 0; i < inst->magnLen; i++) {
+    covMagnPause += (magnIn[i] - avgMagn) * (inst->magnAvgPause[i] - avgPause);
+    varPause += (inst->magnAvgPause[i] - avgPause) * (inst->magnAvgPause[i] - avgPause);
+    varMagn += (magnIn[i] - avgMagn) * (magnIn[i] - avgMagn);
+  }
+  covMagnPause = covMagnPause / ((float)inst->magnLen);
+  varPause = varPause / ((float)inst->magnLen);
+  varMagn = varMagn / ((float)inst->magnLen);
+  // update of average magnitude spectrum
+  inst->featureData[6] += inst->signalEnergy;
+
+  avgDiffNormMagn = varMagn - (covMagnPause * covMagnPause) / (varPause + (float)0.0001);
+  // normalize and compute time-avg update of difference feature
+  avgDiffNormMagn = (float)(avgDiffNormMagn / (inst->featureData[5] + (float)0.0001));
+  inst->featureData[4] += SPECT_DIFF_TAVG * (avgDiffNormMagn - inst->featureData[4]);
+}
+
+// Compute speech/noise probability
+// speech/noise probability is returned in: probSpeechFinal
+//magn is the input magnitude spectrum
+//noise is the noise spectrum
+//snrLocPrior is the prior snr for each freq.
+//snr loc_post is the post snr for each freq.
+void WebRtcNs_SpeechNoiseProb(NSinst_t* inst, float* probSpeechFinal, float* snrLocPrior,
+                              float* snrLocPost) {
+  int i, sgnMap;
+  float invLrt, gainPrior, indPrior;
+  float logLrtTimeAvgKsum, besselTmp;
+  float indicator0, indicator1, indicator2;
+  float tmpFloat1, tmpFloat2;
+  float weightIndPrior0, weightIndPrior1, weightIndPrior2;
+  float threshPrior0, threshPrior1, threshPrior2;
+  float widthPrior, widthPrior0, widthPrior1, widthPrior2;
+
+  widthPrior0 = WIDTH_PR_MAP;
+  widthPrior1 = (float)2.0 * WIDTH_PR_MAP; //width for pause region:
+  // lower range, so increase width in tanh map
+  widthPrior2 = (float)2.0 * WIDTH_PR_MAP; //for spectral-difference measure
+
+  //threshold parameters for features
+  threshPrior0 = inst->priorModelPars[0];
+  threshPrior1 = inst->priorModelPars[1];
+  threshPrior2 = inst->priorModelPars[3];
+
+  //sign for flatness feature
+  sgnMap = (int)(inst->priorModelPars[2]);
+
+  //weight parameters for features
+  weightIndPrior0 = inst->priorModelPars[4];
+  weightIndPrior1 = inst->priorModelPars[5];
+  weightIndPrior2 = inst->priorModelPars[6];
+
+  // compute feature based on average LR factor
+  // this is the average over all frequencies of the smooth log lrt
+  logLrtTimeAvgKsum = 0.0;
+  for (i = 0; i < inst->magnLen; i++) {
+    tmpFloat1 = (float)1.0 + (float)2.0 * snrLocPrior[i];
+    tmpFloat2 = (float)2.0 * snrLocPrior[i] / (tmpFloat1 + (float)0.0001);
+    besselTmp = (snrLocPost[i] + (float)1.0) * tmpFloat2;
+    inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - (float)log(tmpFloat1)
+                                          - inst->logLrtTimeAvg[i]);
+    logLrtTimeAvgKsum += inst->logLrtTimeAvg[i];
+  }
+  logLrtTimeAvgKsum = (float)logLrtTimeAvgKsum / (inst->magnLen);
+  inst->featureData[3] = logLrtTimeAvgKsum;
+  // done with computation of LR factor
+
+  //
+  //compute the indicator functions
+  //
+
+  // average lrt feature
+  widthPrior = widthPrior0;
+  //use larger width in tanh map for pause regions
+  if (logLrtTimeAvgKsum < threshPrior0) {
+    widthPrior = widthPrior1;
+  }
+  // compute indicator function: sigmoid map
+  indicator0 = (float)0.5 * ((float)tanh(widthPrior *
+      (logLrtTimeAvgKsum - threshPrior0)) + (float)1.0);
+
+  //spectral flatness feature
+  tmpFloat1 = inst->featureData[0];
+  widthPrior = widthPrior0;
+  //use larger width in tanh map for pause regions
+  if (sgnMap == 1 && (tmpFloat1 > threshPrior1)) {
+    widthPrior = widthPrior1;
+  }
+  if (sgnMap == -1 && (tmpFloat1 < threshPrior1)) {
+    widthPrior = widthPrior1;
+  }
+  // compute indicator function: sigmoid map
+  indicator1 = (float)0.5 * ((float)tanh((float)sgnMap * 
+      widthPrior * (threshPrior1 - tmpFloat1)) + (float)1.0);
+
+  //for template spectrum-difference
+  tmpFloat1 = inst->featureData[4];
+  widthPrior = widthPrior0;
+  //use larger width in tanh map for pause regions
+  if (tmpFloat1 < threshPrior2) {
+    widthPrior = widthPrior2;
+  }
+  // compute indicator function: sigmoid map
+  indicator2 = (float)0.5 * ((float)tanh(widthPrior * (tmpFloat1 - threshPrior2))
+                             + (float)1.0);
+
+  //combine the indicator function with the feature weights
+  indPrior = weightIndPrior0 * indicator0 + weightIndPrior1 * indicator1 + weightIndPrior2
+             * indicator2;
+  // done with computing indicator function
+
+  //compute the prior probability
+  inst->priorSpeechProb += PRIOR_UPDATE * (indPrior - inst->priorSpeechProb);
+  // make sure probabilities are within range: keep floor to 0.01
+  if (inst->priorSpeechProb > 1.0) {
+    inst->priorSpeechProb = (float)1.0;
+  }
+  if (inst->priorSpeechProb < 0.01) {
+    inst->priorSpeechProb = (float)0.01;
+  }
+
+  //final speech probability: combine prior model with LR factor:
+  gainPrior = ((float)1.0 - inst->priorSpeechProb) / (inst->priorSpeechProb + (float)0.0001);
+  for (i = 0; i < inst->magnLen; i++) {
+    invLrt = (float)exp(-inst->logLrtTimeAvg[i]);
+    invLrt = (float)gainPrior * invLrt;
+    probSpeechFinal[i] = (float)1.0 / ((float)1.0 + invLrt);
+  }
+}
+
+int WebRtcNs_ProcessCore(NSinst_t* inst,
+                         short* speechFrame,
+                         short* speechFrameHB,
+                         short* outFrame,
+                         short* outFrameHB) {
+  // main routine for noise reduction
+
+  int     flagHB = 0;
+  int     i;
+  const int kStartBand = 5; // Skip first frequency bins during estimation.
+  int     updateParsFlag;
+
+  float   energy1, energy2, gain, factor, factor1, factor2;
+  float   signalEnergy, sumMagn;
+  float   snrPrior, currentEstimateStsa;
+  float   tmpFloat1, tmpFloat2, tmpFloat3, probSpeech, probNonSpeech;
+  float   gammaNoiseTmp, gammaNoiseOld;
+  float   noiseUpdateTmp, fTmp, dTmp;
+  float   fin[BLOCKL_MAX], fout[BLOCKL_MAX];
+  float   winData[ANAL_BLOCKL_MAX];
+  float   magn[HALF_ANAL_BLOCKL], noise[HALF_ANAL_BLOCKL];
+  float   theFilter[HALF_ANAL_BLOCKL], theFilterTmp[HALF_ANAL_BLOCKL];
+  float   snrLocPost[HALF_ANAL_BLOCKL], snrLocPrior[HALF_ANAL_BLOCKL];
+  float   probSpeechFinal[HALF_ANAL_BLOCKL], previousEstimateStsa[HALF_ANAL_BLOCKL];
+  float   real[ANAL_BLOCKL_MAX], imag[HALF_ANAL_BLOCKL];
+  // Variables during startup
+  float   sum_log_i = 0.0;
+  float   sum_log_i_square = 0.0;
+  float   sum_log_magn = 0.0;
+  float   sum_log_i_log_magn = 0.0;
+  float   parametric_noise = 0.0;
+  float   parametric_exp = 0.0;
+  float   parametric_num = 0.0;
+
+  // SWB variables
+  int     deltaBweHB = 1;
+  int     deltaGainHB = 1;
+  float   decayBweHB = 1.0;
+  float   gainMapParHB = 1.0;
+  float   gainTimeDomainHB = 1.0;
+  float   avgProbSpeechHB, avgProbSpeechHBTmp, avgFilterGainHB, gainModHB;
+
+  // Check that initiation has been done
+  if (inst->initFlag != 1) {
+    return (-1);
+  }
+  // Check for valid pointers based on sampling rate
+  if (inst->fs == 32000) {
+    if (speechFrameHB == NULL) {
+      return -1;
+    }
+    flagHB = 1;
+    // range for averaging low band quantities for H band gain
+    deltaBweHB = (int)inst->magnLen / 4;
+    deltaGainHB = deltaBweHB;
+  }
+  //
+  updateParsFlag = inst->modelUpdatePars[0];
+  //
+
+  //for LB do all processing
+  // convert to float
+  for (i = 0; i < inst->blockLen10ms; i++) {
+    fin[i] = (float)speechFrame[i];
+  }
+  // update analysis buffer for L band
+  memcpy(inst->dataBuf, inst->dataBuf + inst->blockLen10ms,
+         sizeof(float) * (inst->anaLen - inst->blockLen10ms));
+  memcpy(inst->dataBuf + inst->anaLen - inst->blockLen10ms, fin,
+         sizeof(float) * inst->blockLen10ms);
+
+  if (flagHB == 1) {
+    // convert to float
+    for (i = 0; i < inst->blockLen10ms; i++) {
+      fin[i] = (float)speechFrameHB[i];
+    }
+    // update analysis buffer for H band
+    memcpy(inst->dataBufHB, inst->dataBufHB + inst->blockLen10ms,
+           sizeof(float) * (inst->anaLen - inst->blockLen10ms));
+    memcpy(inst->dataBufHB + inst->anaLen - inst->blockLen10ms, fin,
+           sizeof(float) * inst->blockLen10ms);
+  }
+
+  // check if processing needed
+  if (inst->outLen == 0) {
+    // windowing
+    energy1 = 0.0;
+    for (i = 0; i < inst->anaLen; i++) {
+      winData[i] = inst->window[i] * inst->dataBuf[i];
+      energy1 += winData[i] * winData[i];
+    }
+    if (energy1 == 0.0) {
+      // synthesize the special case of zero input
+      // we want to avoid updating statistics in this case:
+      // Updating feature statistics when we have zeros only will cause thresholds to
+      // move towards zero signal situations. This in turn has the effect that once the
+      // signal is "turned on" (non-zero values) everything will be treated as speech
+      // and there is no noise suppression effect. Depending on the duration of the
+      // inactive signal it takes a considerable amount of time for the system to learn
+      // what is noise and what is speech.
+
+      // read out fully processed segment
+      for (i = inst->windShift; i < inst->blockLen + inst->windShift; i++) {
+        fout[i - inst->windShift] = inst->syntBuf[i];
+      }
+      // update synthesis buffer
+      memcpy(inst->syntBuf, inst->syntBuf + inst->blockLen,
+             sizeof(float) * (inst->anaLen - inst->blockLen));
+      memset(inst->syntBuf + inst->anaLen - inst->blockLen, 0,
+             sizeof(float) * inst->blockLen);
+
+      // out buffer
+      inst->outLen = inst->blockLen - inst->blockLen10ms;
+      if (inst->blockLen > inst->blockLen10ms) {
+        for (i = 0; i < inst->outLen; i++) {
+          inst->outBuf[i] = fout[i + inst->blockLen10ms];
+        }
+      }
+      // convert to short
+      for (i = 0; i < inst->blockLen10ms; i++) {
+        dTmp = fout[i];
+        if (dTmp < WEBRTC_SPL_WORD16_MIN) {
+          dTmp = WEBRTC_SPL_WORD16_MIN;
+        } else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
+          dTmp = WEBRTC_SPL_WORD16_MAX;
+        }
+        outFrame[i] = (short)dTmp;
+      }
+
+      // for time-domain gain of HB
+      if (flagHB == 1) {
+        for (i = 0; i < inst->blockLen10ms; i++) {
+          dTmp = inst->dataBufHB[i];
+          if (dTmp < WEBRTC_SPL_WORD16_MIN) {
+            dTmp = WEBRTC_SPL_WORD16_MIN;
+          } else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
+            dTmp = WEBRTC_SPL_WORD16_MAX;
+          }
+          outFrameHB[i] = (short)dTmp;
+        }
+      } // end of H band gain computation
+      //
+      return 0;
+    }
+
+    //
+    inst->blockInd++; // Update the block index only when we process a block.
+    // FFT
+    WebRtc_rdft(inst->anaLen, 1, winData, inst->ip, inst->wfft);
+
+    imag[0] = 0;
+    real[0] = winData[0];
+    magn[0] = (float)(fabs(real[0]) + 1.0f);
+    imag[inst->magnLen - 1] = 0;
+    real[inst->magnLen - 1] = winData[1];
+    magn[inst->magnLen - 1] = (float)(fabs(real[inst->magnLen - 1]) + 1.0f);
+    signalEnergy = (float)(real[0] * real[0]) + 
+                   (float)(real[inst->magnLen - 1] * real[inst->magnLen - 1]);
+    sumMagn = magn[0] + magn[inst->magnLen - 1];
+    if (inst->blockInd < END_STARTUP_SHORT) {
+      inst->initMagnEst[0] += magn[0];
+      inst->initMagnEst[inst->magnLen - 1] += magn[inst->magnLen - 1];
+      tmpFloat2 = log((float)(inst->magnLen - 1));
+      sum_log_i = tmpFloat2;
+      sum_log_i_square = tmpFloat2 * tmpFloat2;
+      tmpFloat1 = log(magn[inst->magnLen - 1]);
+      sum_log_magn = tmpFloat1;
+      sum_log_i_log_magn = tmpFloat2 * tmpFloat1;
+    }
+    for (i = 1; i < inst->magnLen - 1; i++) {
+      real[i] = winData[2 * i];
+      imag[i] = winData[2 * i + 1];
+      // magnitude spectrum
+      fTmp = real[i] * real[i];
+      fTmp += imag[i] * imag[i];
+      signalEnergy += fTmp;
+      magn[i] = ((float)sqrt(fTmp)) + 1.0f;
+      sumMagn += magn[i];
+      if (inst->blockInd < END_STARTUP_SHORT) {
+        inst->initMagnEst[i] += magn[i];
+        if (i >= kStartBand) {
+          tmpFloat2 = log((float)i);
+          sum_log_i += tmpFloat2;
+          sum_log_i_square += tmpFloat2 * tmpFloat2;
+          tmpFloat1 = log(magn[i]);
+          sum_log_magn += tmpFloat1;
+          sum_log_i_log_magn += tmpFloat2 * tmpFloat1;
+        }
+      }
+    }
+    signalEnergy = signalEnergy / ((float)inst->magnLen);
+    inst->signalEnergy = signalEnergy;
+    inst->sumMagn = sumMagn;
+
+    //compute spectral flatness on input spectrum
+    WebRtcNs_ComputeSpectralFlatness(inst, magn);
+    // quantile noise estimate
+    WebRtcNs_NoiseEstimation(inst, magn, noise);
+    //compute simplified noise model during startup
+    if (inst->blockInd < END_STARTUP_SHORT) {
+      // Estimate White noise
+      inst->whiteNoiseLevel += sumMagn / ((float)inst->magnLen) * inst->overdrive;
+      // Estimate Pink noise parameters
+      tmpFloat1 = sum_log_i_square * ((float)(inst->magnLen - kStartBand));
+      tmpFloat1 -= (sum_log_i * sum_log_i);
+      tmpFloat2 = (sum_log_i_square * sum_log_magn - sum_log_i * sum_log_i_log_magn);
+      tmpFloat3 = tmpFloat2 / tmpFloat1;
+      // Constrain the estimated spectrum to be positive
+      if (tmpFloat3 < 0.0f) {
+        tmpFloat3 = 0.0f;
+      }
+      inst->pinkNoiseNumerator += tmpFloat3;
+      tmpFloat2 = (sum_log_i * sum_log_magn);
+      tmpFloat2 -= ((float)(inst->magnLen - kStartBand)) * sum_log_i_log_magn;
+      tmpFloat3 = tmpFloat2 / tmpFloat1;
+      // Constrain the pink noise power to be in the interval [0, 1];
+      if (tmpFloat3 < 0.0f) {
+        tmpFloat3 = 0.0f;
+      }
+      if (tmpFloat3 > 1.0f) {
+        tmpFloat3 = 1.0f;
+      }
+      inst->pinkNoiseExp += tmpFloat3;
+
+      // Calculate frequency independent parts of parametric noise estimate.
+      if (inst->pinkNoiseExp == 0.0f) {
+        // Use white noise estimate
+        parametric_noise = inst->whiteNoiseLevel;
+      } else {
+        // Use pink noise estimate
+        parametric_num = exp(inst->pinkNoiseNumerator / (float)(inst->blockInd + 1));
+        parametric_num *= (float)(inst->blockInd + 1);
+        parametric_exp = inst->pinkNoiseExp / (float)(inst->blockInd + 1);
+        parametric_noise = parametric_num / pow((float)kStartBand, parametric_exp);
+      }
+      for (i = 0; i < inst->magnLen; i++) {
+        // Estimate the background noise using the white and pink noise parameters
+        if ((inst->pinkNoiseExp > 0.0f) && (i >= kStartBand)) {
+          // Use pink noise estimate
+          parametric_noise = parametric_num / pow((float)i, parametric_exp);
+        }
+        theFilterTmp[i] = (inst->initMagnEst[i] - inst->overdrive * parametric_noise);
+        theFilterTmp[i] /= (inst->initMagnEst[i] + (float)0.0001);
+        // Weight quantile noise with modeled noise
+        noise[i] *= (inst->blockInd);
+        tmpFloat2 = parametric_noise * (END_STARTUP_SHORT - inst->blockInd);
+        noise[i] += (tmpFloat2 / (float)(inst->blockInd + 1));
+        noise[i] /= END_STARTUP_SHORT;
+      }
+    }
+    //compute average signal during END_STARTUP_LONG time:
+    // used to normalize spectral difference measure
+    if (inst->blockInd < END_STARTUP_LONG) {
+      inst->featureData[5] *= inst->blockInd;
+      inst->featureData[5] += signalEnergy;
+      inst->featureData[5] /= (inst->blockInd + 1);
+    }
+
+#ifdef PROCESS_FLOW_0
+    if (inst->blockInd > END_STARTUP_LONG) {
+      //option: average the quantile noise: for check with AEC2
+      for (i = 0; i < inst->magnLen; i++) {
+        noise[i] = (float)0.6 * inst->noisePrev[i] + (float)0.4 * noise[i];
+      }
+      for (i = 0; i < inst->magnLen; i++) {
+        // Wiener with over sub-substraction:
+        theFilter[i] = (magn[i] - inst->overdrive * noise[i]) / (magn[i] + (float)0.0001);
+      }
+    }
+#else
+    //start processing at frames == converged+1
+    //
+    // STEP 1: compute  prior and post snr based on quantile noise est
+    //
+
+    // compute DD estimate of prior SNR: needed for new method
+    for (i = 0; i < inst->magnLen; i++) {
+      // post snr
+      snrLocPost[i] = (float)0.0;
+      if (magn[i] > noise[i]) {
+        snrLocPost[i] = magn[i] / (noise[i] + (float)0.0001) - (float)1.0;
+      }
+      // previous post snr
+      // previous estimate: based on previous frame with gain filter
+      previousEstimateStsa[i] = inst->magnPrev[i] / (inst->noisePrev[i] + (float)0.0001)
+                                * (inst->smooth[i]);
+      // DD estimate is sum of two terms: current estimate and previous estimate
+      // directed decision update of snrPrior
+      snrLocPrior[i] = DD_PR_SNR * previousEstimateStsa[i] + ((float)1.0 - DD_PR_SNR)
+                       * snrLocPost[i];
+      // post and prior snr needed for step 2
+    } // end of loop over freqs
+#ifdef PROCESS_FLOW_1
+    for (i = 0; i < inst->magnLen; i++) {
+      // gain filter
+      tmpFloat1 = inst->overdrive + snrLocPrior[i];
+      tmpFloat2 = (float)snrLocPrior[i] / tmpFloat1;
+      theFilter[i] = (float)tmpFloat2;
+    } // end of loop over freqs
+#endif
+    // done with step 1: dd computation of prior and post snr
+
+    //
+    //STEP 2: compute speech/noise likelihood
+    //
+#ifdef PROCESS_FLOW_2
+    // compute difference of input spectrum with learned/estimated noise spectrum
+    WebRtcNs_ComputeSpectralDifference(inst, magn);
+    // compute histograms for parameter decisions (thresholds and weights for features)
+    // parameters are extracted once every window time (=inst->modelUpdatePars[1])
+    if (updateParsFlag >= 1) {
+      // counter update
+      inst->modelUpdatePars[3]--;
+      // update histogram
+      if (inst->modelUpdatePars[3] > 0) {
+        WebRtcNs_FeatureParameterExtraction(inst, 0);
+      }
+      // compute model parameters
+      if (inst->modelUpdatePars[3] == 0) {
+        WebRtcNs_FeatureParameterExtraction(inst, 1);
+        inst->modelUpdatePars[3] = inst->modelUpdatePars[1];
+        // if wish to update only once, set flag to zero
+        if (updateParsFlag == 1) {
+          inst->modelUpdatePars[0] = 0;
+        } else {
+          // update every window:
+          // get normalization for spectral difference for next window estimate
+          inst->featureData[6] = inst->featureData[6]
+                                 / ((float)inst->modelUpdatePars[1]);
+          inst->featureData[5] = (float)0.5 * (inst->featureData[6]
+                                               + inst->featureData[5]);
+          inst->featureData[6] = (float)0.0;
+        }
+      }
+    }
+    // compute speech/noise probability
+    WebRtcNs_SpeechNoiseProb(inst, probSpeechFinal, snrLocPrior, snrLocPost);
+    // time-avg parameter for noise update
+    gammaNoiseTmp = NOISE_UPDATE;
+    for (i = 0; i < inst->magnLen; i++) {
+      probSpeech = probSpeechFinal[i];
+      probNonSpeech = (float)1.0 - probSpeech;
+      // temporary noise update:
+      // use it for speech frames if update value is less than previous
+      noiseUpdateTmp = gammaNoiseTmp * inst->noisePrev[i] + ((float)1.0 - gammaNoiseTmp)
+                       * (probNonSpeech * magn[i] + probSpeech * inst->noisePrev[i]);
+      //
+      // time-constant based on speech/noise state
+      gammaNoiseOld = gammaNoiseTmp;
+      gammaNoiseTmp = NOISE_UPDATE;
+      // increase gamma (i.e., less noise update) for frame likely to be speech
+      if (probSpeech > PROB_RANGE) {
+        gammaNoiseTmp = SPEECH_UPDATE;
+      }
+      // conservative noise update
+      if (probSpeech < PROB_RANGE) {
+        inst->magnAvgPause[i] += GAMMA_PAUSE * (magn[i] - inst->magnAvgPause[i]);
+      }
+      // noise update
+      if (gammaNoiseTmp == gammaNoiseOld) {
+        noise[i] = noiseUpdateTmp;
+      } else {
+        noise[i] = gammaNoiseTmp * inst->noisePrev[i] + ((float)1.0 - gammaNoiseTmp)
+                   * (probNonSpeech * magn[i] + probSpeech * inst->noisePrev[i]);
+        // allow for noise update downwards:
+        //  if noise update decreases the noise, it is safe, so allow it to happen
+        if (noiseUpdateTmp < noise[i]) {
+          noise[i] = noiseUpdateTmp;
+        }
+      }
+    } // end of freq loop
+    // done with step 2: noise update
+
+    //
+    // STEP 3: compute dd update of prior snr and post snr based on new noise estimate
+    //
+    for (i = 0; i < inst->magnLen; i++) {
+      // post and prior snr
+      currentEstimateStsa = (float)0.0;
+      if (magn[i] > noise[i]) {
+        currentEstimateStsa = magn[i] / (noise[i] + (float)0.0001) - (float)1.0;
+      }
+      // DD estimate is sume of two terms: current estimate and previous estimate
+      // directed decision update of snrPrior
+      snrPrior = DD_PR_SNR * previousEstimateStsa[i] + ((float)1.0 - DD_PR_SNR)
+                 * currentEstimateStsa;
+      // gain filter
+      tmpFloat1 = inst->overdrive + snrPrior;
+      tmpFloat2 = (float)snrPrior / tmpFloat1;
+      theFilter[i] = (float)tmpFloat2;
+    } // end of loop over freqs
+    // done with step3
+#endif
+#endif
+
+    for (i = 0; i < inst->magnLen; i++) {
+      // flooring bottom
+      if (theFilter[i] < inst->denoiseBound) {
+        theFilter[i] = inst->denoiseBound;
+      }
+      // flooring top
+      if (theFilter[i] > (float)1.0) {
+        theFilter[i] = 1.0;
+      }
+      if (inst->blockInd < END_STARTUP_SHORT) {
+        // flooring bottom
+        if (theFilterTmp[i] < inst->denoiseBound) {
+          theFilterTmp[i] = inst->denoiseBound;
+        }
+        // flooring top
+        if (theFilterTmp[i] > (float)1.0) {
+          theFilterTmp[i] = 1.0;
+        }
+        // Weight the two suppression filters
+        theFilter[i] *= (inst->blockInd);
+        theFilterTmp[i] *= (END_STARTUP_SHORT - inst->blockInd);
+        theFilter[i] += theFilterTmp[i];
+        theFilter[i] /= (END_STARTUP_SHORT);
+      }
+      // smoothing
+#ifdef PROCESS_FLOW_0
+      inst->smooth[i] *= SMOOTH; // value set to 0.7 in define.h file
+      inst->smooth[i] += ((float)1.0 - SMOOTH) * theFilter[i];
+#else
+      inst->smooth[i] = theFilter[i];
+#endif
+      real[i] *= inst->smooth[i];
+      imag[i] *= inst->smooth[i];
+    }
+    // keep track of noise and magn spectrum for next frame
+    for (i = 0; i < inst->magnLen; i++) {
+      inst->noisePrev[i] = noise[i];
+      inst->magnPrev[i] = magn[i];
+    }
+    // back to time domain
+    winData[0] = real[0];
+    winData[1] = real[inst->magnLen - 1];
+    for (i = 1; i < inst->magnLen - 1; i++) {
+      winData[2 * i] = real[i];
+      winData[2 * i + 1] = imag[i];
+    }
+    WebRtc_rdft(inst->anaLen, -1, winData, inst->ip, inst->wfft);
+
+    for (i = 0; i < inst->anaLen; i++) {
+      real[i] = 2.0f * winData[i] / inst->anaLen; // fft scaling
+    }
+
+    //scale factor: only do it after END_STARTUP_LONG time
+    factor = (float)1.0;
+    if (inst->gainmap == 1 && inst->blockInd > END_STARTUP_LONG) {
+      factor1 = (float)1.0;
+      factor2 = (float)1.0;
+
+      energy2 = 0.0;
+      for (i = 0; i < inst->anaLen; i++) {
+        energy2 += (float)real[i] * (float)real[i];
+      }
+      gain = (float)sqrt(energy2 / (energy1 + (float)1.0));
+
+#ifdef PROCESS_FLOW_2
+      // scaling for new version
+      if (gain > B_LIM) {
+        factor1 = (float)1.0 + (float)1.3 * (gain - B_LIM);
+        if (gain * factor1 > (float)1.0) {
+          factor1 = (float)1.0 / gain;
+        }
+      }
+      if (gain < B_LIM) {
+        //don't reduce scale too much for pause regions:
+        // attenuation here should be controlled by flooring
+        if (gain <= inst->denoiseBound) {
+          gain = inst->denoiseBound;
+        }
+        factor2 = (float)1.0 - (float)0.3 * (B_LIM - gain);
+      }
+      //combine both scales with speech/noise prob:
+      // note prior (priorSpeechProb) is not frequency dependent
+      factor = inst->priorSpeechProb * factor1 + ((float)1.0 - inst->priorSpeechProb)
+               * factor2;
+#else
+      if (gain > B_LIM) {
+        factor = (float)1.0 + (float)1.3 * (gain - B_LIM);
+      } else {
+        factor = (float)1.0 + (float)2.0 * (gain - B_LIM);
+      }
+      if (gain * factor > (float)1.0) {
+        factor = (float)1.0 / gain;
+      }
+#endif
+    } // out of inst->gainmap==1
+
+    // synthesis
+    for (i = 0; i < inst->anaLen; i++) {
+      inst->syntBuf[i] += factor * inst->window[i] * (float)real[i];
+    }
+    // read out fully processed segment
+    for (i = inst->windShift; i < inst->blockLen + inst->windShift; i++) {
+      fout[i - inst->windShift] = inst->syntBuf[i];
+    }
+    // update synthesis buffer
+    memcpy(inst->syntBuf, inst->syntBuf + inst->blockLen,
+           sizeof(float) * (inst->anaLen - inst->blockLen));
+    memset(inst->syntBuf + inst->anaLen - inst->blockLen, 0,
+           sizeof(float) * inst->blockLen);
+
+    // out buffer
+    inst->outLen = inst->blockLen - inst->blockLen10ms;
+    if (inst->blockLen > inst->blockLen10ms) {
+      for (i = 0; i < inst->outLen; i++) {
+        inst->outBuf[i] = fout[i + inst->blockLen10ms];
+      }
+    }
+  } // end of if out.len==0
+  else {
+    for (i = 0; i < inst->blockLen10ms; i++) {
+      fout[i] = inst->outBuf[i];
+    }
+    memcpy(inst->outBuf, inst->outBuf + inst->blockLen10ms,
+           sizeof(float) * (inst->outLen - inst->blockLen10ms));
+    memset(inst->outBuf + inst->outLen - inst->blockLen10ms, 0,
+           sizeof(float) * inst->blockLen10ms);
+    inst->outLen -= inst->blockLen10ms;
+  }
+
+  // convert to short
+  for (i = 0; i < inst->blockLen10ms; i++) {
+    dTmp = fout[i];
+    if (dTmp < WEBRTC_SPL_WORD16_MIN) {
+      dTmp = WEBRTC_SPL_WORD16_MIN;
+    } else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
+      dTmp = WEBRTC_SPL_WORD16_MAX;
+    }
+    outFrame[i] = (short)dTmp;
+  }
+
+  // for time-domain gain of HB
+  if (flagHB == 1) {
+    for (i = 0; i < inst->magnLen; i++) {
+      inst->speechProbHB[i] = probSpeechFinal[i];
+    }
+    if (inst->blockInd > END_STARTUP_LONG) {
+      // average speech prob from low band
+      // avg over second half (i.e., 4->8kHz) of freq. spectrum
+      avgProbSpeechHB = 0.0;
+      for (i = inst->magnLen - deltaBweHB - 1; i < inst->magnLen - 1; i++) {
+        avgProbSpeechHB += inst->speechProbHB[i];
+      }
+      avgProbSpeechHB = avgProbSpeechHB / ((float)deltaBweHB);
+      // average filter gain from low band
+      // average over second half (i.e., 4->8kHz) of freq. spectrum
+      avgFilterGainHB = 0.0;
+      for (i = inst->magnLen - deltaGainHB - 1; i < inst->magnLen - 1; i++) {
+        avgFilterGainHB += inst->smooth[i];
+      }
+      avgFilterGainHB = avgFilterGainHB / ((float)(deltaGainHB));
+      avgProbSpeechHBTmp = (float)2.0 * avgProbSpeechHB - (float)1.0;
+      // gain based on speech prob:
+      gainModHB = (float)0.5 * ((float)1.0 + (float)tanh(gainMapParHB * avgProbSpeechHBTmp));
+      //combine gain with low band gain
+      gainTimeDomainHB = (float)0.5 * gainModHB + (float)0.5 * avgFilterGainHB;
+      if (avgProbSpeechHB >= (float)0.5) {
+        gainTimeDomainHB = (float)0.25 * gainModHB + (float)0.75 * avgFilterGainHB;
+      }
+      gainTimeDomainHB = gainTimeDomainHB * decayBweHB;
+    } // end of converged
+    //make sure gain is within flooring range
+    // flooring bottom
+    if (gainTimeDomainHB < inst->denoiseBound) {
+      gainTimeDomainHB = inst->denoiseBound;
+    }
+    // flooring top
+    if (gainTimeDomainHB > (float)1.0) {
+      gainTimeDomainHB = 1.0;
+    }
+    //apply gain
+    for (i = 0; i < inst->blockLen10ms; i++) {
+      dTmp = gainTimeDomainHB * inst->dataBufHB[i];
+      if (dTmp < WEBRTC_SPL_WORD16_MIN) {
+        dTmp = WEBRTC_SPL_WORD16_MIN;
+      } else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
+        dTmp = WEBRTC_SPL_WORD16_MAX;
+      }
+      outFrameHB[i] = (short)dTmp;
+    }
+  } // end of H band gain computation
+  //
+
+  return 0;
+}
diff --git a/src/modules/audio_processing/ns/ns_core.h b/src/modules/audio_processing/ns/ns_core.h
new file mode 100644
index 0000000..2f4c34f
--- /dev/null
+++ b/src/modules/audio_processing/ns/ns_core.h
@@ -0,0 +1,179 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
+
+#include "defines.h"
+
+typedef struct NSParaExtract_t_ {
+
+  //bin size of histogram
+  float binSizeLrt;
+  float binSizeSpecFlat;
+  float binSizeSpecDiff;
+  //range of histogram over which lrt threshold is computed
+  float rangeAvgHistLrt;
+  //scale parameters: multiply dominant peaks of the histograms by scale factor to obtain
+  //thresholds for prior model
+  float factor1ModelPars; //for lrt and spectral difference
+  float factor2ModelPars; //for spectral_flatness: used when noise is flatter than speech
+  //peak limit for spectral flatness (varies between 0 and 1)
+  float thresPosSpecFlat;
+  //limit on spacing of two highest peaks in histogram: spacing determined by bin size
+  float limitPeakSpacingSpecFlat;
+  float limitPeakSpacingSpecDiff;
+  //limit on relevance of second peak:
+  float limitPeakWeightsSpecFlat;
+  float limitPeakWeightsSpecDiff;
+  //limit on fluctuation of lrt feature
+  float thresFluctLrt;
+  //limit on the max and min values for the feature thresholds
+  float maxLrt;
+  float minLrt;
+  float maxSpecFlat;
+  float minSpecFlat;
+  float maxSpecDiff;
+  float minSpecDiff;
+  //criteria of weight of histogram peak  to accept/reject feature
+  int thresWeightSpecFlat;
+  int thresWeightSpecDiff;
+
+} NSParaExtract_t;
+
+typedef struct NSinst_t_ {
+
+  WebRtc_UWord32  fs;
+  int             blockLen;
+  int             blockLen10ms;
+  int             windShift;
+  int             outLen;
+  int             anaLen;
+  int             magnLen;
+  int             aggrMode;
+  const float*    window;
+  float           dataBuf[ANAL_BLOCKL_MAX];
+  float           syntBuf[ANAL_BLOCKL_MAX];
+  float           outBuf[3 * BLOCKL_MAX];
+
+  int             initFlag;
+  // parameters for quantile noise estimation
+  float           density[SIMULT* HALF_ANAL_BLOCKL];
+  float           lquantile[SIMULT* HALF_ANAL_BLOCKL];
+  float           quantile[HALF_ANAL_BLOCKL];
+  int             counter[SIMULT];
+  int             updates;
+  // parameters for Wiener filter
+  float           smooth[HALF_ANAL_BLOCKL];
+  float           overdrive;
+  float           denoiseBound;
+  int             gainmap;
+  // fft work arrays.
+  int             ip[IP_LENGTH];
+  float           wfft[W_LENGTH];
+
+  // parameters for new method: some not needed, will reduce/cleanup later
+  WebRtc_Word32   blockInd;                           //frame index counter
+  int             modelUpdatePars[4];                 //parameters for updating or estimating
+  // thresholds/weights for prior model
+  float           priorModelPars[7];                  //parameters for prior model
+  float           noisePrev[HALF_ANAL_BLOCKL];        //noise spectrum from previous frame
+  float           magnPrev[HALF_ANAL_BLOCKL];         //magnitude spectrum of previous frame
+  float           logLrtTimeAvg[HALF_ANAL_BLOCKL];    //log lrt factor with time-smoothing
+  float           priorSpeechProb;                    //prior speech/noise probability
+  float           featureData[7];                     //data for features
+  float           magnAvgPause[HALF_ANAL_BLOCKL];     //conservative noise spectrum estimate
+  float           signalEnergy;                       //energy of magn
+  float           sumMagn;                            //sum of magn
+  float           whiteNoiseLevel;                    //initial noise estimate
+  float           initMagnEst[HALF_ANAL_BLOCKL];      //initial magnitude spectrum estimate
+  float           pinkNoiseNumerator;                 //pink noise parameter: numerator
+  float           pinkNoiseExp;                       //pink noise parameter: power of freq
+  NSParaExtract_t featureExtractionParams;            //parameters for feature extraction
+  //histograms for parameter estimation
+  int             histLrt[HIST_PAR_EST];
+  int             histSpecFlat[HIST_PAR_EST];
+  int             histSpecDiff[HIST_PAR_EST];
+  //quantities for high band estimate
+  float           speechProbHB[HALF_ANAL_BLOCKL];     //final speech/noise prob: prior + LRT
+  float           dataBufHB[ANAL_BLOCKL_MAX];         //buffering data for HB
+
+} NSinst_t;
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************
+ * WebRtcNs_InitCore(...)
+ *
+ * This function initializes a noise suppression instance
+ *
+ * Input:
+ *      - inst          : Instance that should be initialized
+ *      - fs            : Sampling frequency
+ *
+ * Output:
+ *      - inst          : Initialized instance
+ *
+ * Return value         :  0 - Ok
+ *                        -1 - Error
+ */
+int WebRtcNs_InitCore(NSinst_t* inst, WebRtc_UWord32 fs);
+
+/****************************************************************************
+ * WebRtcNs_set_policy_core(...)
+ *
+ * This changes the aggressiveness of the noise suppression method.
+ *
+ * Input:
+ *      - inst          : Instance that should be initialized
+ *      - mode          : 0: Mild (6 dB), 1: Medium (10 dB), 2: Aggressive (15 dB)
+ *
+ * Output:
+ *      - NS_inst      : Initialized instance
+ *
+ * Return value         :  0 - Ok
+ *                        -1 - Error
+ */
+int WebRtcNs_set_policy_core(NSinst_t* inst, int mode);
+
+/****************************************************************************
+ * WebRtcNs_ProcessCore
+ *
+ * Do noise suppression.
+ *
+ * Input:
+ *      - inst          : Instance that should be initialized
+ *      - inFrameLow    : Input speech frame for lower band
+ *      - inFrameHigh   : Input speech frame for higher band
+ *
+ * Output:
+ *      - inst          : Updated instance
+ *      - outFrameLow   : Output speech frame for lower band
+ *      - outFrameHigh  : Output speech frame for higher band
+ *
+ * Return value         :  0 - OK
+ *                        -1 - Error
+ */
+
+
+int WebRtcNs_ProcessCore(NSinst_t* inst,
+                         short* inFrameLow,
+                         short* inFrameHigh,
+                         short* outFrameLow,
+                         short* outFrameHigh);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
diff --git a/src/modules/audio_processing/ns/nsx_core.c b/src/modules/audio_processing/ns/nsx_core.c
new file mode 100644
index 0000000..51bde0c
--- /dev/null
+++ b/src/modules/audio_processing/ns/nsx_core.c
@@ -0,0 +1,2444 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "noise_suppression_x.h"
+
+#include <assert.h>
+#include <math.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "cpu_features_wrapper.h"
+#include "nsx_core.h"
+
+// Skip first frequency bins during estimation. (0 <= value < 64)
+static const int kStartBand = 5;
+
+// Constants to compensate for shifting signal log(2^shifts).
+const WebRtc_Word16 WebRtcNsx_kLogTable[9] = {
+  0, 177, 355, 532, 710, 887, 1065, 1242, 1420
+};
+
+const WebRtc_Word16 WebRtcNsx_kCounterDiv[201] = {
+  32767, 16384, 10923, 8192, 6554, 5461, 4681,
+  4096, 3641, 3277, 2979, 2731, 2521, 2341, 2185, 2048, 1928, 1820, 1725, 1638, 1560,
+  1489, 1425, 1365, 1311, 1260, 1214, 1170, 1130, 1092, 1057, 1024, 993, 964, 936, 910,
+  886, 862, 840, 819, 799, 780, 762, 745, 728, 712, 697, 683, 669, 655, 643, 630, 618,
+  607, 596, 585, 575, 565, 555, 546, 537, 529, 520, 512, 504, 496, 489, 482, 475, 468,
+  462, 455, 449, 443, 437, 431, 426, 420, 415, 410, 405, 400, 395, 390, 386, 381, 377,
+  372, 368, 364, 360, 356, 352, 349, 345, 341, 338, 334, 331, 328, 324, 321, 318, 315,
+  312, 309, 306, 303, 301, 298, 295, 293, 290, 287, 285, 282, 280, 278, 275, 273, 271,
+  269, 266, 264, 262, 260, 258, 256, 254, 252, 250, 248, 246, 245, 243, 241, 239, 237,
+  236, 234, 232, 231, 229, 228, 226, 224, 223, 221, 220, 218, 217, 216, 214, 213, 211,
+  210, 209, 207, 206, 205, 204, 202, 201, 200, 199, 197, 196, 195, 194, 193, 192, 191,
+  189, 188, 187, 186, 185, 184, 183, 182, 181, 180, 179, 178, 177, 176, 175, 174, 173,
+  172, 172, 171, 170, 169, 168, 167, 166, 165, 165, 164, 163
+};
+
+const WebRtc_Word16 WebRtcNsx_kLogTableFrac[256] = {
+  0,   1,   3,   4,   6,   7,   9,  10,  11,  13,  14,  16,  17,  18,  20,  21,
+  22,  24,  25,  26,  28,  29,  30,  32,  33,  34,  36,  37,  38,  40,  41,  42,
+  44,  45,  46,  47,  49,  50,  51,  52,  54,  55,  56,  57,  59,  60,  61,  62,
+  63,  65,  66,  67,  68,  69,  71,  72,  73,  74,  75,  77,  78,  79,  80,  81,
+  82,  84,  85,  86,  87,  88,  89,  90,  92,  93,  94,  95,  96,  97,  98,  99,
+  100, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, 117,
+  118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
+  134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+  150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+  165, 166, 167, 168, 169, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 178,
+  179, 180, 181, 182, 183, 184, 185, 185, 186, 187, 188, 189, 190, 191, 192, 192,
+  193, 194, 195, 196, 197, 198, 198, 199, 200, 201, 202, 203, 203, 204, 205, 206,
+  207, 208, 208, 209, 210, 211, 212, 212, 213, 214, 215, 216, 216, 217, 218, 219,
+  220, 220, 221, 222, 223, 224, 224, 225, 226, 227, 228, 228, 229, 230, 231, 231,
+  232, 233, 234, 234, 235, 236, 237, 238, 238, 239, 240, 241, 241, 242, 243, 244,
+  244, 245, 246, 247, 247, 248, 249, 249, 250, 251, 252, 252, 253, 254, 255, 255
+};
+
+static const WebRtc_Word16 kPowTableFrac[1024] = {
+  0,    1,    1,    2,    3,    3,    4,    5,
+  6,    6,    7,    8,    8,    9,   10,   10,
+  11,   12,   13,   13,   14,   15,   15,   16,
+  17,   17,   18,   19,   20,   20,   21,   22,
+  22,   23,   24,   25,   25,   26,   27,   27,
+  28,   29,   30,   30,   31,   32,   32,   33,
+  34,   35,   35,   36,   37,   37,   38,   39,
+  40,   40,   41,   42,   42,   43,   44,   45,
+  45,   46,   47,   48,   48,   49,   50,   50,
+  51,   52,   53,   53,   54,   55,   56,   56,
+  57,   58,   58,   59,   60,   61,   61,   62,
+  63,   64,   64,   65,   66,   67,   67,   68,
+  69,   69,   70,   71,   72,   72,   73,   74,
+  75,   75,   76,   77,   78,   78,   79,   80,
+  81,   81,   82,   83,   84,   84,   85,   86,
+  87,   87,   88,   89,   90,   90,   91,   92,
+  93,   93,   94,   95,   96,   96,   97,   98,
+  99,  100,  100,  101,  102,  103,  103,  104,
+  105,  106,  106,  107,  108,  109,  109,  110,
+  111,  112,  113,  113,  114,  115,  116,  116,
+  117,  118,  119,  119,  120,  121,  122,  123,
+  123,  124,  125,  126,  126,  127,  128,  129,
+  130,  130,  131,  132,  133,  133,  134,  135,
+  136,  137,  137,  138,  139,  140,  141,  141,
+  142,  143,  144,  144,  145,  146,  147,  148,
+  148,  149,  150,  151,  152,  152,  153,  154,
+  155,  156,  156,  157,  158,  159,  160,  160,
+  161,  162,  163,  164,  164,  165,  166,  167,
+  168,  168,  169,  170,  171,  172,  173,  173,
+  174,  175,  176,  177,  177,  178,  179,  180,
+  181,  181,  182,  183,  184,  185,  186,  186,
+  187,  188,  189,  190,  190,  191,  192,  193,
+  194,  195,  195,  196,  197,  198,  199,  200,
+  200,  201,  202,  203,  204,  205,  205,  206,
+  207,  208,  209,  210,  210,  211,  212,  213,
+  214,  215,  215,  216,  217,  218,  219,  220,
+  220,  221,  222,  223,  224,  225,  225,  226,
+  227,  228,  229,  230,  231,  231,  232,  233,
+  234,  235,  236,  237,  237,  238,  239,  240,
+  241,  242,  243,  243,  244,  245,  246,  247,
+  248,  249,  249,  250,  251,  252,  253,  254,
+  255,  255,  256,  257,  258,  259,  260,  261,
+  262,  262,  263,  264,  265,  266,  267,  268,
+  268,  269,  270,  271,  272,  273,  274,  275,
+  276,  276,  277,  278,  279,  280,  281,  282,
+  283,  283,  284,  285,  286,  287,  288,  289,
+  290,  291,  291,  292,  293,  294,  295,  296,
+  297,  298,  299,  299,  300,  301,  302,  303,
+  304,  305,  306,  307,  308,  308,  309,  310,
+  311,  312,  313,  314,  315,  316,  317,  318,
+  318,  319,  320,  321,  322,  323,  324,  325,
+  326,  327,  328,  328,  329,  330,  331,  332,
+  333,  334,  335,  336,  337,  338,  339,  339,
+  340,  341,  342,  343,  344,  345,  346,  347,
+  348,  349,  350,  351,  352,  352,  353,  354,
+  355,  356,  357,  358,  359,  360,  361,  362,
+  363,  364,  365,  366,  367,  367,  368,  369,
+  370,  371,  372,  373,  374,  375,  376,  377,
+  378,  379,  380,  381,  382,  383,  384,  385,
+  385,  386,  387,  388,  389,  390,  391,  392,
+  393,  394,  395,  396,  397,  398,  399,  400,
+  401,  402,  403,  404,  405,  406,  407,  408,
+  409,  410,  410,  411,  412,  413,  414,  415,
+  416,  417,  418,  419,  420,  421,  422,  423,
+  424,  425,  426,  427,  428,  429,  430,  431,
+  432,  433,  434,  435,  436,  437,  438,  439,
+  440,  441,  442,  443,  444,  445,  446,  447,
+  448,  449,  450,  451,  452,  453,  454,  455,
+  456,  457,  458,  459,  460,  461,  462,  463,
+  464,  465,  466,  467,  468,  469,  470,  471,
+  472,  473,  474,  475,  476,  477,  478,  479,
+  480,  481,  482,  483,  484,  485,  486,  487,
+  488,  489,  490,  491,  492,  493,  494,  495,
+  496,  498,  499,  500,  501,  502,  503,  504,
+  505,  506,  507,  508,  509,  510,  511,  512,
+  513,  514,  515,  516,  517,  518,  519,  520,
+  521,  522,  523,  525,  526,  527,  528,  529,
+  530,  531,  532,  533,  534,  535,  536,  537,
+  538,  539,  540,  541,  542,  544,  545,  546,
+  547,  548,  549,  550,  551,  552,  553,  554,
+  555,  556,  557,  558,  560,  561,  562,  563,
+  564,  565,  566,  567,  568,  569,  570,  571,
+  572,  574,  575,  576,  577,  578,  579,  580,
+  581,  582,  583,  584,  585,  587,  588,  589,
+  590,  591,  592,  593,  594,  595,  596,  597,
+  599,  600,  601,  602,  603,  604,  605,  606,
+  607,  608,  610,  611,  612,  613,  614,  615,
+  616,  617,  618,  620,  621,  622,  623,  624,
+  625,  626,  627,  628,  630,  631,  632,  633,
+  634,  635,  636,  637,  639,  640,  641,  642,
+  643,  644,  645,  646,  648,  649,  650,  651,
+  652,  653,  654,  656,  657,  658,  659,  660,
+  661,  662,  664,  665,  666,  667,  668,  669,
+  670,  672,  673,  674,  675,  676,  677,  678,
+  680,  681,  682,  683,  684,  685,  687,  688,
+  689,  690,  691,  692,  693,  695,  696,  697,
+  698,  699,  700,  702,  703,  704,  705,  706,
+  708,  709,  710,  711,  712,  713,  715,  716,
+  717,  718,  719,  720,  722,  723,  724,  725,
+  726,  728,  729,  730,  731,  732,  733,  735,
+  736,  737,  738,  739,  741,  742,  743,  744,
+  745,  747,  748,  749,  750,  751,  753,  754,
+  755,  756,  757,  759,  760,  761,  762,  763,
+  765,  766,  767,  768,  770,  771,  772,  773,
+  774,  776,  777,  778,  779,  780,  782,  783,
+  784,  785,  787,  788,  789,  790,  792,  793,
+  794,  795,  796,  798,  799,  800,  801,  803,
+  804,  805,  806,  808,  809,  810,  811,  813,
+  814,  815,  816,  818,  819,  820,  821,  823,
+  824,  825,  826,  828,  829,  830,  831,  833,
+  834,  835,  836,  838,  839,  840,  841,  843,
+  844,  845,  846,  848,  849,  850,  851,  853,
+  854,  855,  857,  858,  859,  860,  862,  863,
+  864,  866,  867,  868,  869,  871,  872,  873,
+  874,  876,  877,  878,  880,  881,  882,  883,
+  885,  886,  887,  889,  890,  891,  893,  894,
+  895,  896,  898,  899,  900,  902,  903,  904,
+  906,  907,  908,  909,  911,  912,  913,  915,
+  916,  917,  919,  920,  921,  923,  924,  925,
+  927,  928,  929,  931,  932,  933,  935,  936,
+  937,  938,  940,  941,  942,  944,  945,  946,
+  948,  949,  950,  952,  953,  955,  956,  957,
+  959,  960,  961,  963,  964,  965,  967,  968,
+  969,  971,  972,  973,  975,  976,  977,  979,
+  980,  981,  983,  984,  986,  987,  988,  990,
+  991,  992,  994,  995,  996,  998,  999, 1001,
+  1002, 1003, 1005, 1006, 1007, 1009, 1010, 1012,
+  1013, 1014, 1016, 1017, 1018, 1020, 1021, 1023
+};
+
+static const WebRtc_Word16 kIndicatorTable[17] = {
+  0, 2017, 3809, 5227, 6258, 6963, 7424, 7718,
+  7901, 8014, 8084, 8126, 8152, 8168, 8177, 8183, 8187
+};
+
+// hybrib Hanning & flat window
+static const WebRtc_Word16 kBlocks80w128x[128] = {
+  0,    536,   1072,   1606,   2139,   2669,   3196,   3720,   4240,   4756,   5266,
+  5771,   6270,   6762,   7246,   7723,   8192,   8652,   9102,   9543,   9974,  10394,
+  10803,  11200,  11585,  11958,  12318,  12665,  12998,  13318,  13623,  13913,  14189,
+  14449,  14694,  14924,  15137,  15334,  15515,  15679,  15826,  15956,  16069,  16165,
+  16244,  16305,  16349,  16375,  16384,  16384,  16384,  16384,  16384,  16384,  16384,
+  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,
+  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,
+  16384,  16384,  16384,  16384,  16375,  16349,  16305,  16244,  16165,  16069,  15956,
+  15826,  15679,  15515,  15334,  15137,  14924,  14694,  14449,  14189,  13913,  13623,
+  13318,  12998,  12665,  12318,  11958,  11585,  11200,  10803,  10394,   9974,   9543,
+  9102,   8652,   8192,   7723,   7246,   6762,   6270,   5771,   5266,   4756,   4240,
+  3720,   3196,   2669,   2139,   1606,   1072,    536
+};
+
+// hybrib Hanning & flat window
+static const WebRtc_Word16 kBlocks160w256x[256] = {
+  0,   268,   536,   804,  1072,  1339,  1606,  1872,
+  2139,  2404,  2669,  2933,  3196,  3459,  3720,  3981,
+  4240,  4499,  4756,  5012,  5266,  5520,  5771,  6021,
+  6270,  6517,  6762,  7005,  7246,  7486,  7723,  7959,
+  8192,  8423,  8652,  8878,  9102,  9324,  9543,  9760,
+  9974, 10185, 10394, 10600, 10803, 11003, 11200, 11394,
+  11585, 11773, 11958, 12140, 12318, 12493, 12665, 12833,
+  12998, 13160, 13318, 13472, 13623, 13770, 13913, 14053,
+  14189, 14321, 14449, 14574, 14694, 14811, 14924, 15032,
+  15137, 15237, 15334, 15426, 15515, 15599, 15679, 15754,
+  15826, 15893, 15956, 16015, 16069, 16119, 16165, 16207,
+  16244, 16277, 16305, 16329, 16349, 16364, 16375, 16382,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16382, 16375, 16364, 16349, 16329, 16305, 16277,
+  16244, 16207, 16165, 16119, 16069, 16015, 15956, 15893,
+  15826, 15754, 15679, 15599, 15515, 15426, 15334, 15237,
+  15137, 15032, 14924, 14811, 14694, 14574, 14449, 14321,
+  14189, 14053, 13913, 13770, 13623, 13472, 13318, 13160,
+  12998, 12833, 12665, 12493, 12318, 12140, 11958, 11773,
+  11585, 11394, 11200, 11003, 10803, 10600, 10394, 10185,
+  9974,  9760,  9543,  9324,  9102,  8878,  8652,  8423,
+  8192,  7959,  7723,  7486,  7246,  7005,  6762,  6517,
+  6270,  6021,  5771,  5520,  5266,  5012,  4756,  4499,
+  4240,  3981,  3720,  3459,  3196,  2933,  2669,  2404,
+  2139,  1872,  1606,  1339,  1072,   804,   536,   268
+};
+
+// Gain factor1 table: Input value in Q8 and output value in Q13
+// original floating point code
+//  if (gain > blim) {
+//    factor1 = 1.0 + 1.3 * (gain - blim);
+//    if (gain * factor1 > 1.0) {
+//      factor1 = 1.0 / gain;
+//    }
+//  } else {
+//    factor1 = 1.0;
+//  }
+static const WebRtc_Word16 kFactor1Table[257] = {
+  8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8233, 8274, 8315, 8355, 8396, 8436, 8475, 8515, 8554, 8592, 8631, 8669,
+  8707, 8745, 8783, 8820, 8857, 8894, 8931, 8967, 9003, 9039, 9075, 9111, 9146, 9181,
+  9216, 9251, 9286, 9320, 9354, 9388, 9422, 9456, 9489, 9523, 9556, 9589, 9622, 9655,
+  9687, 9719, 9752, 9784, 9816, 9848, 9879, 9911, 9942, 9973, 10004, 10035, 10066,
+  10097, 10128, 10158, 10188, 10218, 10249, 10279, 10308, 10338, 10368, 10397, 10426,
+  10456, 10485, 10514, 10543, 10572, 10600, 10629, 10657, 10686, 10714, 10742, 10770,
+  10798, 10826, 10854, 10882, 10847, 10810, 10774, 10737, 10701, 10666, 10631, 10596,
+  10562, 10527, 10494, 10460, 10427, 10394, 10362, 10329, 10297, 10266, 10235, 10203,
+  10173, 10142, 10112, 10082, 10052, 10023, 9994, 9965, 9936, 9908, 9879, 9851, 9824,
+  9796, 9769, 9742, 9715, 9689, 9662, 9636, 9610, 9584, 9559, 9534, 9508, 9484, 9459,
+  9434, 9410, 9386, 9362, 9338, 9314, 9291, 9268, 9245, 9222, 9199, 9176, 9154, 9132,
+  9110, 9088, 9066, 9044, 9023, 9002, 8980, 8959, 8939, 8918, 8897, 8877, 8857, 8836,
+  8816, 8796, 8777, 8757, 8738, 8718, 8699, 8680, 8661, 8642, 8623, 8605, 8586, 8568,
+  8550, 8532, 8514, 8496, 8478, 8460, 8443, 8425, 8408, 8391, 8373, 8356, 8339, 8323,
+  8306, 8289, 8273, 8256, 8240, 8224, 8208, 8192
+};
+
+// For Factor2 tables
+// original floating point code
+// if (gain > blim) {
+//   factor2 = 1.0;
+// } else {
+//   factor2 = 1.0 - 0.3 * (blim - gain);
+//   if (gain <= inst->denoiseBound) {
+//     factor2 = 1.0 - 0.3 * (blim - inst->denoiseBound);
+//   }
+// }
+//
+// Gain factor table: Input value in Q8 and output value in Q13
+static const WebRtc_Word16 kFactor2Aggressiveness1[257] = {
+  7577, 7577, 7577, 7577, 7577, 7577,
+  7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7596, 7614, 7632,
+  7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845,
+  7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016,
+  8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162,
+  8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192
+};
+
+// Gain factor table: Input value in Q8 and output value in Q13
+static const WebRtc_Word16 kFactor2Aggressiveness2[257] = {
+  7270, 7270, 7270, 7270, 7270, 7306,
+  7339, 7369, 7397, 7424, 7448, 7472, 7495, 7517, 7537, 7558, 7577, 7596, 7614, 7632,
+  7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845,
+  7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016,
+  8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162,
+  8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192
+};
+
+// Gain factor table: Input value in Q8 and output value in Q13
+static const WebRtc_Word16 kFactor2Aggressiveness3[257] = {
+  7184, 7184, 7184, 7229, 7270, 7306,
+  7339, 7369, 7397, 7424, 7448, 7472, 7495, 7517, 7537, 7558, 7577, 7596, 7614, 7632,
+  7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845,
+  7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016,
+  8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162,
+  8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192
+};
+
+// sum of log2(i) from table index to inst->anaLen2 in Q5
+// Note that the first table value is invalid, since log2(0) = -infinity
+static const WebRtc_Word16 kSumLogIndex[66] = {
+  0,  22917,  22917,  22885,  22834,  22770,  22696,  22613,
+  22524,  22428,  22326,  22220,  22109,  21994,  21876,  21754,
+  21629,  21501,  21370,  21237,  21101,  20963,  20822,  20679,
+  20535,  20388,  20239,  20089,  19937,  19783,  19628,  19470,
+  19312,  19152,  18991,  18828,  18664,  18498,  18331,  18164,
+  17994,  17824,  17653,  17480,  17306,  17132,  16956,  16779,
+  16602,  16423,  16243,  16063,  15881,  15699,  15515,  15331,
+  15146,  14960,  14774,  14586,  14398,  14209,  14019,  13829,
+  13637,  13445
+};
+
+// sum of log2(i)^2 from table index to inst->anaLen2 in Q2
+// Note that the first table value is invalid, since log2(0) = -infinity
+static const WebRtc_Word16 kSumSquareLogIndex[66] = {
+  0,  16959,  16959,  16955,  16945,  16929,  16908,  16881,
+  16850,  16814,  16773,  16729,  16681,  16630,  16575,  16517,
+  16456,  16392,  16325,  16256,  16184,  16109,  16032,  15952,
+  15870,  15786,  15700,  15612,  15521,  15429,  15334,  15238,
+  15140,  15040,  14938,  14834,  14729,  14622,  14514,  14404,
+  14292,  14179,  14064,  13947,  13830,  13710,  13590,  13468,
+  13344,  13220,  13094,  12966,  12837,  12707,  12576,  12444,
+  12310,  12175,  12039,  11902,  11763,  11624,  11483,  11341,
+  11198,  11054
+};
+
+// log2(table index) in Q12
+// Note that the first table value is invalid, since log2(0) = -infinity
+static const WebRtc_Word16 kLogIndex[129] = {
+  0,      0,   4096,   6492,   8192,   9511,  10588,  11499,
+  12288,  12984,  13607,  14170,  14684,  15157,  15595,  16003,
+  16384,  16742,  17080,  17400,  17703,  17991,  18266,  18529,
+  18780,  19021,  19253,  19476,  19691,  19898,  20099,  20292,
+  20480,  20662,  20838,  21010,  21176,  21338,  21496,  21649,
+  21799,  21945,  22087,  22226,  22362,  22495,  22625,  22752,
+  22876,  22998,  23117,  23234,  23349,  23462,  23572,  23680,
+  23787,  23892,  23994,  24095,  24195,  24292,  24388,  24483,
+  24576,  24668,  24758,  24847,  24934,  25021,  25106,  25189,
+  25272,  25354,  25434,  25513,  25592,  25669,  25745,  25820,
+  25895,  25968,  26041,  26112,  26183,  26253,  26322,  26390,
+  26458,  26525,  26591,  26656,  26721,  26784,  26848,  26910,
+  26972,  27033,  27094,  27154,  27213,  27272,  27330,  27388,
+  27445,  27502,  27558,  27613,  27668,  27722,  27776,  27830,
+  27883,  27935,  27988,  28039,  28090,  28141,  28191,  28241,
+  28291,  28340,  28388,  28437,  28484,  28532,  28579,  28626,
+  28672
+};
+
+// determinant of estimation matrix in Q0 corresponding to the log2 tables above
+// Note that the first table value is invalid, since log2(0) = -infinity
+static const WebRtc_Word16 kDeterminantEstMatrix[66] = {
+  0,  29814,  25574,  22640,  20351,  18469,  16873,  15491,
+  14277,  13199,  12233,  11362,  10571,   9851,   9192,   8587,
+  8030,   7515,   7038,   6596,   6186,   5804,   5448,   5115,
+  4805,   4514,   4242,   3988,   3749,   3524,   3314,   3116,
+  2930,   2755,   2590,   2435,   2289,   2152,   2022,   1900,
+  1785,   1677,   1575,   1478,   1388,   1302,   1221,   1145,
+  1073,   1005,    942,    881,    825,    771,    721,    674,
+  629,    587,    547,    510,    475,    442,    411,    382,
+  355,    330
+};
+
+// Declare function pointers.
+NoiseEstimation WebRtcNsx_NoiseEstimation;
+PrepareSpectrum WebRtcNsx_PrepareSpectrum;
+SynthesisUpdate WebRtcNsx_SynthesisUpdate;
+AnalysisUpdate WebRtcNsx_AnalysisUpdate;
+Denormalize WebRtcNsx_Denormalize;
+CreateComplexBuffer WebRtcNsx_CreateComplexBuffer;
+
+// Update the noise estimation information.
+static void UpdateNoiseEstimate(NsxInst_t* inst, int offset) {
+  WebRtc_Word32 tmp32no1 = 0;
+  WebRtc_Word32 tmp32no2 = 0;
+  WebRtc_Word16 tmp16 = 0;
+  const WebRtc_Word16 kExp2Const = 11819; // Q13
+
+  int i = 0;
+
+  tmp16 = WebRtcSpl_MaxValueW16(inst->noiseEstLogQuantile + offset,
+                                   inst->magnLen);
+  // Guarantee a Q-domain as high as possible and still fit in int16
+  inst->qNoise = 14 - (int) WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                   kExp2Const, tmp16, 21);
+  for (i = 0; i < inst->magnLen; i++) {
+    // inst->quantile[i]=exp(inst->lquantile[offset+i]);
+    // in Q21
+    tmp32no2 = WEBRTC_SPL_MUL_16_16(kExp2Const,
+                                    inst->noiseEstLogQuantile[offset + i]);
+    tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac
+    tmp16 = (WebRtc_Word16) WEBRTC_SPL_RSHIFT_W32(tmp32no2, 21);
+    tmp16 -= 21;// shift 21 to get result in Q0
+    tmp16 += (WebRtc_Word16) inst->qNoise; //shift to get result in Q(qNoise)
+    if (tmp16 < 0) {
+      tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no1, -tmp16);
+    } else {
+      tmp32no1 = WEBRTC_SPL_LSHIFT_W32(tmp32no1, tmp16);
+    }
+    inst->noiseEstQuantile[i] = WebRtcSpl_SatW32ToW16(tmp32no1);
+  }
+}
+
+// Noise Estimation
+static void NoiseEstimationC(NsxInst_t* inst,
+                             uint16_t* magn,
+                             uint32_t* noise,
+                             int16_t* q_noise) {
+  WebRtc_Word16 lmagn[HALF_ANAL_BLOCKL], counter, countDiv;
+  WebRtc_Word16 countProd, delta, zeros, frac;
+  WebRtc_Word16 log2, tabind, logval, tmp16, tmp16no1, tmp16no2;
+  const int16_t log2_const = 22713; // Q15
+  const int16_t width_factor = 21845;
+
+  int i, s, offset;
+
+  tabind = inst->stages - inst->normData;
+  assert(tabind < 9);
+  assert(tabind > -9);
+  if (tabind < 0) {
+    logval = -WebRtcNsx_kLogTable[-tabind];
+  } else {
+    logval = WebRtcNsx_kLogTable[tabind];
+  }
+
+  // lmagn(i)=log(magn(i))=log(2)*log2(magn(i))
+  // magn is in Q(-stages), and the real lmagn values are:
+  // real_lmagn(i)=log(magn(i)*2^stages)=log(magn(i))+log(2^stages)
+  // lmagn in Q8
+  for (i = 0; i < inst->magnLen; i++) {
+    if (magn[i]) {
+      zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magn[i]);
+      frac = (WebRtc_Word16)((((WebRtc_UWord32)magn[i] << zeros)
+                              & 0x7FFFFFFF) >> 23);
+      // log2(magn(i))
+      assert(frac < 256);
+      log2 = (WebRtc_Word16)(((31 - zeros) << 8)
+                             + WebRtcNsx_kLogTableFrac[frac]);
+      // log2(magn(i))*log(2)
+      lmagn[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(log2, log2_const, 15);
+      // + log(2^stages)
+      lmagn[i] += logval;
+    } else {
+      lmagn[i] = logval;//0;
+    }
+  }
+
+  // loop over simultaneous estimates
+  for (s = 0; s < SIMULT; s++) {
+    offset = s * inst->magnLen;
+
+    // Get counter values from state
+    counter = inst->noiseEstCounter[s];
+    assert(counter < 201);
+    countDiv = WebRtcNsx_kCounterDiv[counter];
+    countProd = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16(counter, countDiv);
+
+    // quant_est(...)
+    for (i = 0; i < inst->magnLen; i++) {
+      // compute delta
+      if (inst->noiseEstDensity[offset + i] > 512) {
+        // Get the value for delta by shifting intead of dividing.
+        int factor = WebRtcSpl_NormW16(inst->noiseEstDensity[offset + i]);
+        delta = (int16_t)(FACTOR_Q16 >> (14 - factor));
+      } else {
+        delta = FACTOR_Q7;
+        if (inst->blockIndex < END_STARTUP_LONG) {
+          // Smaller step size during startup. This prevents from using
+          // unrealistic values causing overflow.
+          delta = FACTOR_Q7_STARTUP;
+        }
+      }
+
+      // update log quantile estimate
+      tmp16 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(delta, countDiv, 14);
+      if (lmagn[i] > inst->noiseEstLogQuantile[offset + i]) {
+        // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2
+        // CounterDiv=1/(inst->counter[s]+1) in Q15
+        tmp16 += 2;
+        tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 2);
+        inst->noiseEstLogQuantile[offset + i] += tmp16no1;
+      } else {
+        tmp16 += 1;
+        tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 1);
+        // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2
+        tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, 3, 1);
+        inst->noiseEstLogQuantile[offset + i] -= tmp16no2;
+        if (inst->noiseEstLogQuantile[offset + i] < logval) {
+          // This is the smallest fixed point representation we can
+          // have, hence we limit the output.
+          inst->noiseEstLogQuantile[offset + i] = logval;
+        }
+      }
+
+      // update density estimate
+      if (WEBRTC_SPL_ABS_W16(lmagn[i] - inst->noiseEstLogQuantile[offset + i])
+          < WIDTH_Q8) {
+        tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                     inst->noiseEstDensity[offset + i], countProd, 15);
+        tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                     width_factor, countDiv, 15);
+        inst->noiseEstDensity[offset + i] = tmp16no1 + tmp16no2;
+      }
+    } // end loop over magnitude spectrum
+
+    if (counter >= END_STARTUP_LONG) {
+      inst->noiseEstCounter[s] = 0;
+      if (inst->blockIndex >= END_STARTUP_LONG) {
+        UpdateNoiseEstimate(inst, offset);
+      }
+    }
+    inst->noiseEstCounter[s]++;
+
+  } // end loop over simultaneous estimates
+
+  // Sequentially update the noise during startup
+  if (inst->blockIndex < END_STARTUP_LONG) {
+    UpdateNoiseEstimate(inst, offset);
+  }
+
+  for (i = 0; i < inst->magnLen; i++) {
+    noise[i] = (WebRtc_UWord32)(inst->noiseEstQuantile[i]); // Q(qNoise)
+  }
+  (*q_noise) = (WebRtc_Word16)inst->qNoise;
+}
+
+// Filter the data in the frequency domain, and create spectrum.
+static void PrepareSpectrumC(NsxInst_t* inst, int16_t* freq_buf) {
+  int i = 0, j = 0;
+  int16_t tmp16 = 0;
+
+  for (i = 0; i < inst->magnLen; i++) {
+    inst->real[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->real[i],
+        (WebRtc_Word16)(inst->noiseSupFilter[i]), 14); // Q(normData-stages)
+    inst->imag[i] = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->imag[i],
+        (WebRtc_Word16)(inst->noiseSupFilter[i]), 14); // Q(normData-stages)
+  }
+
+  freq_buf[0] = inst->real[0];
+  freq_buf[1] = -inst->imag[0];
+  for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) {
+    tmp16 = (inst->anaLen << 1) - j;
+    freq_buf[j] = inst->real[i];
+    freq_buf[j + 1] = -inst->imag[i];
+    freq_buf[tmp16] = inst->real[i];
+    freq_buf[tmp16 + 1] = inst->imag[i];
+  }
+  freq_buf[inst->anaLen] = inst->real[inst->anaLen2];
+  freq_buf[inst->anaLen + 1] = -inst->imag[inst->anaLen2];
+}
+
+// Denormalize the input buffer.
+static __inline void DenormalizeC(NsxInst_t* inst, int16_t* in, int factor) {
+  int i = 0, j = 0;
+  int32_t tmp32 = 0;
+  for (i = 0, j = 0; i < inst->anaLen; i += 1, j += 2) {
+    tmp32 = WEBRTC_SPL_SHIFT_W32((WebRtc_Word32)in[j],
+                                 factor - inst->normData);
+    inst->real[i] = WebRtcSpl_SatW32ToW16(tmp32); // Q0
+  }
+}
+
+// For the noise supression process, synthesis, read out fully processed
+// segment, and update synthesis buffer.
+static void SynthesisUpdateC(NsxInst_t* inst,
+                             int16_t* out_frame,
+                             int16_t gain_factor) {
+  int i = 0;
+  int16_t tmp16a = 0;
+  int16_t tmp16b = 0;
+  int32_t tmp32 = 0;
+
+  // synthesis
+  for (i = 0; i < inst->anaLen; i++) {
+    tmp16a = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                 inst->window[i], inst->real[i], 14); // Q0, window in Q14
+    tmp32 = WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(tmp16a, gain_factor, 13); // Q0
+    // Down shift with rounding
+    tmp16b = WebRtcSpl_SatW32ToW16(tmp32); // Q0
+    inst->synthesisBuffer[i] = WEBRTC_SPL_ADD_SAT_W16(inst->synthesisBuffer[i],
+                                                      tmp16b); // Q0
+  }
+
+  // read out fully processed segment
+  for (i = 0; i < inst->blockLen10ms; i++) {
+    out_frame[i] = inst->synthesisBuffer[i]; // Q0
+  }
+
+  // update synthesis buffer
+  WEBRTC_SPL_MEMCPY_W16(inst->synthesisBuffer,
+                        inst->synthesisBuffer + inst->blockLen10ms,
+                        inst->anaLen - inst->blockLen10ms);
+  WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer
+      + inst->anaLen - inst->blockLen10ms, inst->blockLen10ms);
+}
+
+// Update analysis buffer for lower band, and window data before FFT.
+static void AnalysisUpdateC(NsxInst_t* inst,
+                            int16_t* out,
+                            int16_t* new_speech) {
+  int i = 0;
+
+  // For lower band update analysis buffer.
+  WEBRTC_SPL_MEMCPY_W16(inst->analysisBuffer,
+                        inst->analysisBuffer + inst->blockLen10ms,
+                        inst->anaLen - inst->blockLen10ms);
+  WEBRTC_SPL_MEMCPY_W16(inst->analysisBuffer
+      + inst->anaLen - inst->blockLen10ms, new_speech, inst->blockLen10ms);
+
+  // Window data before FFT.
+  for (i = 0; i < inst->anaLen; i++) {
+    out[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+               inst->window[i], inst->analysisBuffer[i], 14); // Q0
+  }
+}
+
+// Create a complex number buffer (out[]) as the intput (in[]) interleaved with
+// zeros, and normalize it.
+static __inline void CreateComplexBufferC(NsxInst_t* inst,
+                                          int16_t* in,
+                                          int16_t* out) {
+  int i = 0, j = 0;
+  for (i = 0, j = 0; i < inst->anaLen; i += 1, j += 2) {
+    out[j] = WEBRTC_SPL_LSHIFT_W16(in[i], inst->normData); // Q(normData)
+    out[j + 1] = 0; // Insert zeros in imaginary part
+  }
+}
+
+void WebRtcNsx_CalcParametricNoiseEstimate(NsxInst_t* inst,
+                                           WebRtc_Word16 pink_noise_exp_avg,
+                                           WebRtc_Word32 pink_noise_num_avg,
+                                           int freq_index,
+                                           WebRtc_UWord32* noise_estimate,
+                                           WebRtc_UWord32* noise_estimate_avg) {
+  WebRtc_Word32 tmp32no1 = 0;
+  WebRtc_Word32 tmp32no2 = 0;
+
+  WebRtc_Word16 int_part = 0;
+  WebRtc_Word16 frac_part = 0;
+
+  // Use pink noise estimate
+  // noise_estimate = 2^(pinkNoiseNumerator + pinkNoiseExp * log2(j))
+  assert(freq_index >= 0);
+  assert(freq_index < 129);
+  tmp32no2 = WEBRTC_SPL_MUL_16_16(pink_noise_exp_avg, kLogIndex[freq_index]); // Q26
+  tmp32no2 = WEBRTC_SPL_RSHIFT_W32(tmp32no2, 15); // Q11
+  tmp32no1 = pink_noise_num_avg - tmp32no2; // Q11
+
+  // Calculate output: 2^tmp32no1
+  // Output in Q(minNorm-stages)
+  tmp32no1 += WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)(inst->minNorm - inst->stages), 11);
+  if (tmp32no1 > 0) {
+    int_part = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 11);
+    frac_part = (WebRtc_Word16)(tmp32no1 & 0x000007ff); // Q11
+    // Piecewise linear approximation of 'b' in
+    // 2^(int_part+frac_part) = 2^int_part * (1 + b)
+    // 'b' is given in Q11 and below stored in frac_part.
+    if (WEBRTC_SPL_RSHIFT_W16(frac_part, 10)) {
+      // Upper fractional part
+      tmp32no2 = WEBRTC_SPL_MUL_16_16(2048 - frac_part, 1244); // Q21
+      tmp32no2 = 2048 - WEBRTC_SPL_RSHIFT_W32(tmp32no2, 10);
+    } else {
+      // Lower fractional part
+      tmp32no2 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_16_16(frac_part, 804), 10);
+    }
+    // Shift fractional part to Q(minNorm-stages)
+    tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, int_part - 11);
+    *noise_estimate_avg = WEBRTC_SPL_LSHIFT_U32(1, int_part) + (WebRtc_UWord32)tmp32no2;
+    // Scale up to initMagnEst, which is not block averaged
+    *noise_estimate = (*noise_estimate_avg) * (WebRtc_UWord32)(inst->blockIndex + 1);
+  }
+}
+
+// Initialize state
+WebRtc_Word32 WebRtcNsx_InitCore(NsxInst_t* inst, WebRtc_UWord32 fs) {
+  int i;
+
+  //check for valid pointer
+  if (inst == NULL) {
+    return -1;
+  }
+  //
+
+  // Initialization of struct
+  if (fs == 8000 || fs == 16000 || fs == 32000) {
+    inst->fs = fs;
+  } else {
+    return -1;
+  }
+
+  if (fs == 8000) {
+    inst->blockLen10ms = 80;
+    inst->anaLen = 128;
+    inst->stages = 7;
+    inst->window = kBlocks80w128x;
+    inst->thresholdLogLrt = 131072; //default threshold for LRT feature
+    inst->maxLrt = 0x0040000;
+    inst->minLrt = 52429;
+  } else if (fs == 16000) {
+    inst->blockLen10ms = 160;
+    inst->anaLen = 256;
+    inst->stages = 8;
+    inst->window = kBlocks160w256x;
+    inst->thresholdLogLrt = 212644; //default threshold for LRT feature
+    inst->maxLrt = 0x0080000;
+    inst->minLrt = 104858;
+  } else if (fs == 32000) {
+    inst->blockLen10ms = 160;
+    inst->anaLen = 256;
+    inst->stages = 8;
+    inst->window = kBlocks160w256x;
+    inst->thresholdLogLrt = 212644; //default threshold for LRT feature
+    inst->maxLrt = 0x0080000;
+    inst->minLrt = 104858;
+  }
+  inst->anaLen2 = WEBRTC_SPL_RSHIFT_W16(inst->anaLen, 1);
+  inst->magnLen = inst->anaLen2 + 1;
+
+  WebRtcSpl_ZerosArrayW16(inst->analysisBuffer, ANAL_BLOCKL_MAX);
+  WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer, ANAL_BLOCKL_MAX);
+
+  // for HB processing
+  WebRtcSpl_ZerosArrayW16(inst->dataBufHBFX, ANAL_BLOCKL_MAX);
+  // for quantile noise estimation
+  WebRtcSpl_ZerosArrayW16(inst->noiseEstQuantile, HALF_ANAL_BLOCKL);
+  for (i = 0; i < SIMULT * HALF_ANAL_BLOCKL; i++) {
+    inst->noiseEstLogQuantile[i] = 2048; // Q8
+    inst->noiseEstDensity[i] = 153; // Q9
+  }
+  for (i = 0; i < SIMULT; i++) {
+    inst->noiseEstCounter[i] = (WebRtc_Word16)(END_STARTUP_LONG * (i + 1)) / SIMULT;
+  }
+
+  // Initialize suppression filter with ones
+  WebRtcSpl_MemSetW16((WebRtc_Word16*)inst->noiseSupFilter, 16384, HALF_ANAL_BLOCKL);
+
+  // Set the aggressiveness: default
+  inst->aggrMode = 0;
+
+  //initialize variables for new method
+  inst->priorNonSpeechProb = 8192; // Q14(0.5) prior probability for speech/noise
+  for (i = 0; i < HALF_ANAL_BLOCKL; i++) {
+    inst->prevMagnU16[i] = 0;
+    inst->prevNoiseU32[i] = 0; //previous noise-spectrum
+    inst->logLrtTimeAvgW32[i] = 0; //smooth LR ratio
+    inst->avgMagnPause[i] = 0; //conservative noise spectrum estimate
+    inst->initMagnEst[i] = 0; //initial average magnitude spectrum
+  }
+
+  //feature quantities
+  inst->thresholdSpecDiff = 50; //threshold for difference feature: determined on-line
+  inst->thresholdSpecFlat = 20480; //threshold for flatness: determined on-line
+  inst->featureLogLrt = inst->thresholdLogLrt; //average LRT factor (= threshold)
+  inst->featureSpecFlat = inst->thresholdSpecFlat; //spectral flatness (= threshold)
+  inst->featureSpecDiff = inst->thresholdSpecDiff; //spectral difference (= threshold)
+  inst->weightLogLrt = 6; //default weighting par for LRT feature
+  inst->weightSpecFlat = 0; //default weighting par for spectral flatness feature
+  inst->weightSpecDiff = 0; //default weighting par for spectral difference feature
+
+  inst->curAvgMagnEnergy = 0; //window time-average of input magnitude spectrum
+  inst->timeAvgMagnEnergy = 0; //normalization for spectral difference
+  inst->timeAvgMagnEnergyTmp = 0; //normalization for spectral difference
+
+  //histogram quantities: used to estimate/update thresholds for features
+  WebRtcSpl_ZerosArrayW16(inst->histLrt, HIST_PAR_EST);
+  WebRtcSpl_ZerosArrayW16(inst->histSpecDiff, HIST_PAR_EST);
+  WebRtcSpl_ZerosArrayW16(inst->histSpecFlat, HIST_PAR_EST);
+
+  inst->blockIndex = -1; //frame counter
+
+  //inst->modelUpdate    = 500;   //window for update
+  inst->modelUpdate = (1 << STAT_UPDATES); //window for update
+  inst->cntThresUpdate = 0; //counter feature thresholds updates
+
+  inst->sumMagn = 0;
+  inst->magnEnergy = 0;
+  inst->prevQMagn = 0;
+  inst->qNoise = 0;
+  inst->prevQNoise = 0;
+
+  inst->energyIn = 0;
+  inst->scaleEnergyIn = 0;
+
+  inst->whiteNoiseLevel = 0;
+  inst->pinkNoiseNumerator = 0;
+  inst->pinkNoiseExp = 0;
+  inst->minNorm = 15; // Start with full scale
+  inst->zeroInputSignal = 0;
+
+  //default mode
+  WebRtcNsx_set_policy_core(inst, 0);
+
+#ifdef NS_FILEDEBUG
+  inst->infile = fopen("indebug.pcm", "wb");
+  inst->outfile = fopen("outdebug.pcm", "wb");
+  inst->file1 = fopen("file1.pcm", "wb");
+  inst->file2 = fopen("file2.pcm", "wb");
+  inst->file3 = fopen("file3.pcm", "wb");
+  inst->file4 = fopen("file4.pcm", "wb");
+  inst->file5 = fopen("file5.pcm", "wb");
+#endif
+
+  // Initialize function pointers.
+  WebRtcNsx_NoiseEstimation = NoiseEstimationC;
+  WebRtcNsx_PrepareSpectrum = PrepareSpectrumC;
+  WebRtcNsx_SynthesisUpdate = SynthesisUpdateC;
+  WebRtcNsx_AnalysisUpdate = AnalysisUpdateC;
+  WebRtcNsx_Denormalize = DenormalizeC;
+  WebRtcNsx_CreateComplexBuffer = CreateComplexBufferC;
+
+#ifdef WEBRTC_DETECT_ARM_NEON
+    uint64_t features = WebRtc_GetCPUFeaturesARM();
+    if ((features & kCPUFeatureNEON) != 0)
+    {
+        WebRtcNsx_InitNeon();
+    }
+#elif defined(WEBRTC_ARCH_ARM_NEON)
+    WebRtcNsx_InitNeon();
+#endif
+
+  inst->initFlag = 1;
+
+  return 0;
+}
+
+int WebRtcNsx_set_policy_core(NsxInst_t* inst, int mode) {
+  // allow for modes:0,1,2,3
+  if (mode < 0 || mode > 3) {
+    return -1;
+  }
+
+  inst->aggrMode = mode;
+  if (mode == 0) {
+    inst->overdrive = 256; // Q8(1.0)
+    inst->denoiseBound = 8192; // Q14(0.5)
+    inst->gainMap = 0; // No gain compensation
+  } else if (mode == 1) {
+    inst->overdrive = 256; // Q8(1.0)
+    inst->denoiseBound = 4096; // Q14(0.25)
+    inst->factor2Table = kFactor2Aggressiveness1;
+    inst->gainMap = 1;
+  } else if (mode == 2) {
+    inst->overdrive = 282; // ~= Q8(1.1)
+    inst->denoiseBound = 2048; // Q14(0.125)
+    inst->factor2Table = kFactor2Aggressiveness2;
+    inst->gainMap = 1;
+  } else if (mode == 3) {
+    inst->overdrive = 320; // Q8(1.25)
+    inst->denoiseBound = 1475; // ~= Q14(0.09)
+    inst->factor2Table = kFactor2Aggressiveness3;
+    inst->gainMap = 1;
+  }
+  return 0;
+}
+
+// Extract thresholds for feature parameters
+// histograms are computed over some window_size (given by window_pars)
+// thresholds and weights are extracted every window
+// flag 0 means update histogram only, flag 1 means compute the thresholds/weights
+// threshold and weights are returned in: inst->priorModelPars
+void WebRtcNsx_FeatureParameterExtraction(NsxInst_t* inst, int flag) {
+  WebRtc_UWord32 tmpU32;
+  WebRtc_UWord32 histIndex;
+  WebRtc_UWord32 posPeak1SpecFlatFX, posPeak2SpecFlatFX;
+  WebRtc_UWord32 posPeak1SpecDiffFX, posPeak2SpecDiffFX;
+
+  WebRtc_Word32 tmp32;
+  WebRtc_Word32 fluctLrtFX, thresFluctLrtFX;
+  WebRtc_Word32 avgHistLrtFX, avgSquareHistLrtFX, avgHistLrtComplFX;
+
+  WebRtc_Word16 j;
+  WebRtc_Word16 numHistLrt;
+
+  int i;
+  int useFeatureSpecFlat, useFeatureSpecDiff, featureSum;
+  int maxPeak1, maxPeak2;
+  int weightPeak1SpecFlat, weightPeak2SpecFlat;
+  int weightPeak1SpecDiff, weightPeak2SpecDiff;
+
+  //update histograms
+  if (!flag) {
+    // LRT
+    // Type casting to UWord32 is safe since negative values will not be wrapped to larger
+    // values than HIST_PAR_EST
+    histIndex = (WebRtc_UWord32)(inst->featureLogLrt);
+    if (histIndex < HIST_PAR_EST) {
+      inst->histLrt[histIndex]++;
+    }
+    // Spectral flatness
+    // (inst->featureSpecFlat*20)>>10 = (inst->featureSpecFlat*5)>>8
+    histIndex = WEBRTC_SPL_RSHIFT_U32(inst->featureSpecFlat * 5, 8);
+    if (histIndex < HIST_PAR_EST) {
+      inst->histSpecFlat[histIndex]++;
+    }
+    // Spectral difference
+    histIndex = HIST_PAR_EST;
+    if (inst->timeAvgMagnEnergy > 0) {
+      // Guard against division by zero
+      // If timeAvgMagnEnergy == 0 we have no normalizing statistics and
+      // therefore can't update the histogram
+      histIndex = WEBRTC_SPL_UDIV((inst->featureSpecDiff * 5) >> inst->stages,
+                                  inst->timeAvgMagnEnergy);
+    }
+    if (histIndex < HIST_PAR_EST) {
+      inst->histSpecDiff[histIndex]++;
+    }
+  }
+
+  // extract parameters for speech/noise probability
+  if (flag) {
+    useFeatureSpecDiff = 1;
+    //for LRT feature:
+    // compute the average over inst->featureExtractionParams.rangeAvgHistLrt
+    avgHistLrtFX = 0;
+    avgSquareHistLrtFX = 0;
+    numHistLrt = 0;
+    for (i = 0; i < BIN_SIZE_LRT; i++) {
+      j = (2 * i + 1);
+      tmp32 = WEBRTC_SPL_MUL_16_16(inst->histLrt[i], j);
+      avgHistLrtFX += tmp32;
+      numHistLrt += inst->histLrt[i];
+      avgSquareHistLrtFX += WEBRTC_SPL_MUL_32_16(tmp32, j);
+    }
+    avgHistLrtComplFX = avgHistLrtFX;
+    for (; i < HIST_PAR_EST; i++) {
+      j = (2 * i + 1);
+      tmp32 = WEBRTC_SPL_MUL_16_16(inst->histLrt[i], j);
+      avgHistLrtComplFX += tmp32;
+      avgSquareHistLrtFX += WEBRTC_SPL_MUL_32_16(tmp32, j);
+    }
+    fluctLrtFX = WEBRTC_SPL_MUL(avgSquareHistLrtFX, numHistLrt);
+    fluctLrtFX -= WEBRTC_SPL_MUL(avgHistLrtFX, avgHistLrtComplFX);
+    thresFluctLrtFX = THRES_FLUCT_LRT * numHistLrt;
+    // get threshold for LRT feature:
+    tmpU32 = (FACTOR_1_LRT_DIFF * (WebRtc_UWord32)avgHistLrtFX);
+    if ((fluctLrtFX < thresFluctLrtFX) || (numHistLrt == 0) ||
+        (tmpU32 > (WebRtc_UWord32)(100 * numHistLrt))) {
+      //very low fluctuation, so likely noise
+      inst->thresholdLogLrt = inst->maxLrt;
+    } else {
+      tmp32 = (WebRtc_Word32)((tmpU32 << (9 + inst->stages)) / numHistLrt /
+                              25);
+      // check if value is within min/max range
+      inst->thresholdLogLrt = WEBRTC_SPL_SAT(inst->maxLrt,
+                                             tmp32,
+                                             inst->minLrt);
+    }
+    if (fluctLrtFX < thresFluctLrtFX) {
+      // Do not use difference feature if fluctuation of LRT feature is very low:
+      // most likely just noise state
+      useFeatureSpecDiff = 0;
+    }
+
+    // for spectral flatness and spectral difference: compute the main peaks of histogram
+    maxPeak1 = 0;
+    maxPeak2 = 0;
+    posPeak1SpecFlatFX = 0;
+    posPeak2SpecFlatFX = 0;
+    weightPeak1SpecFlat = 0;
+    weightPeak2SpecFlat = 0;
+
+    // peaks for flatness
+    for (i = 0; i < HIST_PAR_EST; i++) {
+      if (inst->histSpecFlat[i] > maxPeak1) {
+        // Found new "first" peak
+        maxPeak2 = maxPeak1;
+        weightPeak2SpecFlat = weightPeak1SpecFlat;
+        posPeak2SpecFlatFX = posPeak1SpecFlatFX;
+
+        maxPeak1 = inst->histSpecFlat[i];
+        weightPeak1SpecFlat = inst->histSpecFlat[i];
+        posPeak1SpecFlatFX = (WebRtc_UWord32)(2 * i + 1);
+      } else if (inst->histSpecFlat[i] > maxPeak2) {
+        // Found new "second" peak
+        maxPeak2 = inst->histSpecFlat[i];
+        weightPeak2SpecFlat = inst->histSpecFlat[i];
+        posPeak2SpecFlatFX = (WebRtc_UWord32)(2 * i + 1);
+      }
+    }
+
+    // for spectral flatness feature
+    useFeatureSpecFlat = 1;
+    // merge the two peaks if they are close
+    if ((posPeak1SpecFlatFX - posPeak2SpecFlatFX < LIM_PEAK_SPACE_FLAT_DIFF)
+        && (weightPeak2SpecFlat * LIM_PEAK_WEIGHT_FLAT_DIFF > weightPeak1SpecFlat)) {
+      weightPeak1SpecFlat += weightPeak2SpecFlat;
+      posPeak1SpecFlatFX = (posPeak1SpecFlatFX + posPeak2SpecFlatFX) >> 1;
+    }
+    //reject if weight of peaks is not large enough, or peak value too small
+    if (weightPeak1SpecFlat < THRES_WEIGHT_FLAT_DIFF || posPeak1SpecFlatFX
+        < THRES_PEAK_FLAT) {
+      useFeatureSpecFlat = 0;
+    } else { // if selected, get the threshold
+      // compute the threshold and check if value is within min/max range
+      inst->thresholdSpecFlat = WEBRTC_SPL_SAT(MAX_FLAT_Q10, FACTOR_2_FLAT_Q10
+                                               * posPeak1SpecFlatFX, MIN_FLAT_Q10); //Q10
+    }
+    // done with flatness feature
+
+    if (useFeatureSpecDiff) {
+      //compute two peaks for spectral difference
+      maxPeak1 = 0;
+      maxPeak2 = 0;
+      posPeak1SpecDiffFX = 0;
+      posPeak2SpecDiffFX = 0;
+      weightPeak1SpecDiff = 0;
+      weightPeak2SpecDiff = 0;
+      // peaks for spectral difference
+      for (i = 0; i < HIST_PAR_EST; i++) {
+        if (inst->histSpecDiff[i] > maxPeak1) {
+          // Found new "first" peak
+          maxPeak2 = maxPeak1;
+          weightPeak2SpecDiff = weightPeak1SpecDiff;
+          posPeak2SpecDiffFX = posPeak1SpecDiffFX;
+
+          maxPeak1 = inst->histSpecDiff[i];
+          weightPeak1SpecDiff = inst->histSpecDiff[i];
+          posPeak1SpecDiffFX = (WebRtc_UWord32)(2 * i + 1);
+        } else if (inst->histSpecDiff[i] > maxPeak2) {
+          // Found new "second" peak
+          maxPeak2 = inst->histSpecDiff[i];
+          weightPeak2SpecDiff = inst->histSpecDiff[i];
+          posPeak2SpecDiffFX = (WebRtc_UWord32)(2 * i + 1);
+        }
+      }
+
+      // merge the two peaks if they are close
+      if ((posPeak1SpecDiffFX - posPeak2SpecDiffFX < LIM_PEAK_SPACE_FLAT_DIFF)
+          && (weightPeak2SpecDiff * LIM_PEAK_WEIGHT_FLAT_DIFF > weightPeak1SpecDiff)) {
+        weightPeak1SpecDiff += weightPeak2SpecDiff;
+        posPeak1SpecDiffFX = (posPeak1SpecDiffFX + posPeak2SpecDiffFX) >> 1;
+      }
+      // get the threshold value and check if value is within min/max range
+      inst->thresholdSpecDiff = WEBRTC_SPL_SAT(MAX_DIFF, FACTOR_1_LRT_DIFF
+                                               * posPeak1SpecDiffFX, MIN_DIFF); //5x bigger
+      //reject if weight of peaks is not large enough
+      if (weightPeak1SpecDiff < THRES_WEIGHT_FLAT_DIFF) {
+        useFeatureSpecDiff = 0;
+      }
+      // done with spectral difference feature
+    }
+
+    // select the weights between the features
+    // inst->priorModelPars[4] is weight for LRT: always selected
+    featureSum = 6 / (1 + useFeatureSpecFlat + useFeatureSpecDiff);
+    inst->weightLogLrt = featureSum;
+    inst->weightSpecFlat = useFeatureSpecFlat * featureSum;
+    inst->weightSpecDiff = useFeatureSpecDiff * featureSum;
+
+    // set histograms to zero for next update
+    WebRtcSpl_ZerosArrayW16(inst->histLrt, HIST_PAR_EST);
+    WebRtcSpl_ZerosArrayW16(inst->histSpecDiff, HIST_PAR_EST);
+    WebRtcSpl_ZerosArrayW16(inst->histSpecFlat, HIST_PAR_EST);
+  } // end of flag == 1
+}
+
+
+// Compute spectral flatness on input spectrum
+// magn is the magnitude spectrum
+// spectral flatness is returned in inst->featureSpecFlat
+void WebRtcNsx_ComputeSpectralFlatness(NsxInst_t* inst, WebRtc_UWord16* magn) {
+  WebRtc_UWord32 tmpU32;
+  WebRtc_UWord32 avgSpectralFlatnessNum, avgSpectralFlatnessDen;
+
+  WebRtc_Word32 tmp32;
+  WebRtc_Word32 currentSpectralFlatness, logCurSpectralFlatness;
+
+  WebRtc_Word16 zeros, frac, intPart;
+
+  int i;
+
+  // for flatness
+  avgSpectralFlatnessNum = 0;
+  avgSpectralFlatnessDen = inst->sumMagn - (WebRtc_UWord32)magn[0]; // Q(normData-stages)
+
+  // compute log of ratio of the geometric to arithmetic mean: check for log(0) case
+  // flatness = exp( sum(log(magn[i]))/N - log(sum(magn[i])/N) )
+  //          = exp( sum(log(magn[i]))/N ) * N / sum(magn[i])
+  //          = 2^( sum(log2(magn[i]))/N - (log2(sum(magn[i])) - log2(N)) ) [This is used]
+  for (i = 1; i < inst->magnLen; i++) {
+    // First bin is excluded from spectrum measures. Number of bins is now a power of 2
+    if (magn[i]) {
+      zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magn[i]);
+      frac = (WebRtc_Word16)(((WebRtc_UWord32)((WebRtc_UWord32)(magn[i]) << zeros)
+                              & 0x7FFFFFFF) >> 23);
+      // log2(magn(i))
+      assert(frac < 256);
+      tmpU32 = (WebRtc_UWord32)(((31 - zeros) << 8)
+                                + WebRtcNsx_kLogTableFrac[frac]); // Q8
+      avgSpectralFlatnessNum += tmpU32; // Q8
+    } else {
+      //if at least one frequency component is zero, treat separately
+      tmpU32 = WEBRTC_SPL_UMUL_32_16(inst->featureSpecFlat, SPECT_FLAT_TAVG_Q14); // Q24
+      inst->featureSpecFlat -= WEBRTC_SPL_RSHIFT_U32(tmpU32, 14); // Q10
+      return;
+    }
+  }
+  //ratio and inverse log: check for case of log(0)
+  zeros = WebRtcSpl_NormU32(avgSpectralFlatnessDen);
+  frac = (WebRtc_Word16)(((avgSpectralFlatnessDen << zeros) & 0x7FFFFFFF) >> 23);
+  // log2(avgSpectralFlatnessDen)
+  assert(frac < 256);
+  tmp32 = (WebRtc_Word32)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]); // Q8
+  logCurSpectralFlatness = (WebRtc_Word32)avgSpectralFlatnessNum;
+  logCurSpectralFlatness += ((WebRtc_Word32)(inst->stages - 1) << (inst->stages + 7)); // Q(8+stages-1)
+  logCurSpectralFlatness -= (tmp32 << (inst->stages - 1));
+  logCurSpectralFlatness = WEBRTC_SPL_LSHIFT_W32(logCurSpectralFlatness, 10 - inst->stages); // Q17
+  tmp32 = (WebRtc_Word32)(0x00020000 | (WEBRTC_SPL_ABS_W32(logCurSpectralFlatness)
+                                        & 0x0001FFFF)); //Q17
+  intPart = -(WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(logCurSpectralFlatness, 17);
+  intPart += 7; // Shift 7 to get the output in Q10 (from Q17 = -17+10)
+  if (intPart > 0) {
+    currentSpectralFlatness = WEBRTC_SPL_RSHIFT_W32(tmp32, intPart);
+  } else {
+    currentSpectralFlatness = WEBRTC_SPL_LSHIFT_W32(tmp32, -intPart);
+  }
+
+  //time average update of spectral flatness feature
+  tmp32 = currentSpectralFlatness - (WebRtc_Word32)inst->featureSpecFlat; // Q10
+  tmp32 = WEBRTC_SPL_MUL_32_16(SPECT_FLAT_TAVG_Q14, tmp32); // Q24
+  inst->featureSpecFlat = (WebRtc_UWord32)((WebRtc_Word32)inst->featureSpecFlat
+                                           + WEBRTC_SPL_RSHIFT_W32(tmp32, 14)); // Q10
+  // done with flatness feature
+}
+
+
+// Compute the difference measure between input spectrum and a template/learned noise spectrum
+// magn_tmp is the input spectrum
+// the reference/template spectrum is  inst->magn_avg_pause[i]
+// returns (normalized) spectral difference in inst->featureSpecDiff
+void WebRtcNsx_ComputeSpectralDifference(NsxInst_t* inst, WebRtc_UWord16* magnIn) {
+  // This is to be calculated:
+  // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 / var(magnAvgPause)
+
+  WebRtc_UWord32 tmpU32no1, tmpU32no2;
+  WebRtc_UWord32 varMagnUFX, varPauseUFX, avgDiffNormMagnUFX;
+
+  WebRtc_Word32 tmp32no1, tmp32no2;
+  WebRtc_Word32 avgPauseFX, avgMagnFX, covMagnPauseFX;
+  WebRtc_Word32 maxPause, minPause;
+
+  WebRtc_Word16 tmp16no1;
+
+  int i, norm32, nShifts;
+
+  avgPauseFX = 0;
+  maxPause = 0;
+  minPause = inst->avgMagnPause[0]; // Q(prevQMagn)
+  // compute average quantities
+  for (i = 0; i < inst->magnLen; i++) {
+    // Compute mean of magn_pause
+    avgPauseFX += inst->avgMagnPause[i]; // in Q(prevQMagn)
+    maxPause = WEBRTC_SPL_MAX(maxPause, inst->avgMagnPause[i]);
+    minPause = WEBRTC_SPL_MIN(minPause, inst->avgMagnPause[i]);
+  }
+  // normalize by replacing div of "inst->magnLen" with "inst->stages-1" shifts
+  avgPauseFX = WEBRTC_SPL_RSHIFT_W32(avgPauseFX, inst->stages - 1);
+  avgMagnFX = (WebRtc_Word32)WEBRTC_SPL_RSHIFT_U32(inst->sumMagn, inst->stages - 1);
+  // Largest possible deviation in magnPause for (co)var calculations
+  tmp32no1 = WEBRTC_SPL_MAX(maxPause - avgPauseFX, avgPauseFX - minPause);
+  // Get number of shifts to make sure we don't get wrap around in varPause
+  nShifts = WEBRTC_SPL_MAX(0, 10 + inst->stages - WebRtcSpl_NormW32(tmp32no1));
+
+  varMagnUFX = 0;
+  varPauseUFX = 0;
+  covMagnPauseFX = 0;
+  for (i = 0; i < inst->magnLen; i++) {
+    // Compute var and cov of magn and magn_pause
+    tmp16no1 = (WebRtc_Word16)((WebRtc_Word32)magnIn[i] - avgMagnFX);
+    tmp32no2 = inst->avgMagnPause[i] - avgPauseFX;
+    varMagnUFX += (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(tmp16no1, tmp16no1); // Q(2*qMagn)
+    tmp32no1 = WEBRTC_SPL_MUL_32_16(tmp32no2, tmp16no1); // Q(prevQMagn+qMagn)
+    covMagnPauseFX += tmp32no1; // Q(prevQMagn+qMagn)
+    tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no2, nShifts); // Q(prevQMagn-minPause)
+    varPauseUFX += (WebRtc_UWord32)WEBRTC_SPL_MUL(tmp32no1, tmp32no1); // Q(2*(prevQMagn-minPause))
+  }
+  //update of average magnitude spectrum: Q(-2*stages) and averaging replaced by shifts
+  inst->curAvgMagnEnergy += WEBRTC_SPL_RSHIFT_U32(inst->magnEnergy, 2 * inst->normData
+                                                  + inst->stages - 1);
+
+  avgDiffNormMagnUFX = varMagnUFX; // Q(2*qMagn)
+  if ((varPauseUFX) && (covMagnPauseFX)) {
+    tmpU32no1 = (WebRtc_UWord32)WEBRTC_SPL_ABS_W32(covMagnPauseFX); // Q(prevQMagn+qMagn)
+    norm32 = WebRtcSpl_NormU32(tmpU32no1) - 16;
+    if (norm32 > 0) {
+      tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, norm32); // Q(prevQMagn+qMagn+norm32)
+    } else {
+      tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, -norm32); // Q(prevQMagn+qMagn+norm32)
+    }
+    tmpU32no2 = WEBRTC_SPL_UMUL(tmpU32no1, tmpU32no1); // Q(2*(prevQMagn+qMagn-norm32))
+
+    nShifts += norm32;
+    nShifts <<= 1;
+    if (nShifts < 0) {
+      varPauseUFX >>= (-nShifts); // Q(2*(qMagn+norm32+minPause))
+      nShifts = 0;
+    }
+    if (varPauseUFX > 0) {
+      // Q(2*(qMagn+norm32-16+minPause))
+      tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no2, varPauseUFX);
+      tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, nShifts);
+
+      // Q(2*qMagn)
+      avgDiffNormMagnUFX -= WEBRTC_SPL_MIN(avgDiffNormMagnUFX, tmpU32no1);
+    } else {
+      avgDiffNormMagnUFX = 0;
+    }
+  }
+  //normalize and compute time average update of difference feature
+  tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(avgDiffNormMagnUFX, 2 * inst->normData);
+  if (inst->featureSpecDiff > tmpU32no1) {
+    tmpU32no2 = WEBRTC_SPL_UMUL_32_16(inst->featureSpecDiff - tmpU32no1,
+                                      SPECT_DIFF_TAVG_Q8); // Q(8-2*stages)
+    inst->featureSpecDiff -= WEBRTC_SPL_RSHIFT_U32(tmpU32no2, 8); // Q(-2*stages)
+  } else {
+    tmpU32no2 = WEBRTC_SPL_UMUL_32_16(tmpU32no1 - inst->featureSpecDiff,
+                                      SPECT_DIFF_TAVG_Q8); // Q(8-2*stages)
+    inst->featureSpecDiff += WEBRTC_SPL_RSHIFT_U32(tmpU32no2, 8); // Q(-2*stages)
+  }
+}
+
+// Compute speech/noise probability
+// speech/noise probability is returned in: probSpeechFinal
+//snrLocPrior is the prior SNR for each frequency (in Q11)
+//snrLocPost is the post SNR for each frequency (in Q11)
+void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, WebRtc_UWord16* nonSpeechProbFinal,
+                               WebRtc_UWord32* priorLocSnr, WebRtc_UWord32* postLocSnr) {
+  WebRtc_UWord32 zeros, num, den, tmpU32no1, tmpU32no2, tmpU32no3;
+
+  WebRtc_Word32 invLrtFX, indPriorFX, tmp32, tmp32no1, tmp32no2, besselTmpFX32;
+  WebRtc_Word32 frac32, logTmp;
+  WebRtc_Word32 logLrtTimeAvgKsumFX;
+
+  WebRtc_Word16 indPriorFX16;
+  WebRtc_Word16 tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart;
+
+  int i, normTmp, normTmp2, nShifts;
+
+  // compute feature based on average LR factor
+  // this is the average over all frequencies of the smooth log LRT
+  logLrtTimeAvgKsumFX = 0;
+  for (i = 0; i < inst->magnLen; i++) {
+    besselTmpFX32 = (WebRtc_Word32)postLocSnr[i]; // Q11
+    normTmp = WebRtcSpl_NormU32(postLocSnr[i]);
+    num = WEBRTC_SPL_LSHIFT_U32(postLocSnr[i], normTmp); // Q(11+normTmp)
+    if (normTmp > 10) {
+      den = WEBRTC_SPL_LSHIFT_U32(priorLocSnr[i], normTmp - 11); // Q(normTmp)
+    } else {
+      den = WEBRTC_SPL_RSHIFT_U32(priorLocSnr[i], 11 - normTmp); // Q(normTmp)
+    }
+    if (den > 0) {
+      besselTmpFX32 -= WEBRTC_SPL_UDIV(num, den); // Q11
+    } else {
+      besselTmpFX32 -= num; // Q11
+    }
+
+    // inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - log(snrLocPrior) - inst->logLrtTimeAvg[i]);
+    // Here, LRT_TAVG = 0.5
+    zeros = WebRtcSpl_NormU32(priorLocSnr[i]);
+    frac32 = (WebRtc_Word32)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19);
+    tmp32 = WEBRTC_SPL_MUL(frac32, frac32);
+    tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(tmp32, -43), 19);
+    tmp32 += WEBRTC_SPL_MUL_16_16_RSFT((WebRtc_Word16)frac32, 5412, 12);
+    frac32 = tmp32 + 37;
+    // tmp32 = log2(priorLocSnr[i])
+    tmp32 = (WebRtc_Word32)(((31 - zeros) << 12) + frac32) - (11 << 12); // Q12
+    logTmp = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_32_16(tmp32, 178), 8); // log2(priorLocSnr[i])*log(2)
+    tmp32no1 = WEBRTC_SPL_RSHIFT_W32(logTmp + inst->logLrtTimeAvgW32[i], 1); // Q12
+    inst->logLrtTimeAvgW32[i] += (besselTmpFX32 - tmp32no1); // Q12
+
+    logLrtTimeAvgKsumFX += inst->logLrtTimeAvgW32[i]; // Q12
+  }
+  inst->featureLogLrt = WEBRTC_SPL_RSHIFT_W32(logLrtTimeAvgKsumFX * 5, inst->stages + 10); // 5 = BIN_SIZE_LRT / 2
+  // done with computation of LR factor
+
+  //
+  //compute the indicator functions
+  //
+
+  // average LRT feature
+  // FLOAT code
+  // indicator0 = 0.5 * (tanh(widthPrior * (logLrtTimeAvgKsum - threshPrior0)) + 1.0);
+  tmpIndFX = 16384; // Q14(1.0)
+  tmp32no1 = logLrtTimeAvgKsumFX - inst->thresholdLogLrt; // Q12
+  nShifts = 7 - inst->stages; // WIDTH_PR_MAP_SHIFT - inst->stages + 5;
+  //use larger width in tanh map for pause regions
+  if (tmp32no1 < 0) {
+    tmpIndFX = 0;
+    tmp32no1 = -tmp32no1;
+    //widthPrior = widthPrior * 2.0;
+    nShifts++;
+  }
+  tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, nShifts); // Q14
+  // compute indicator function: sigmoid map
+  tableIndex = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 14);
+  if ((tableIndex < 16) && (tableIndex >= 0)) {
+    tmp16no2 = kIndicatorTable[tableIndex];
+    tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+    frac = (WebRtc_Word16)(tmp32no1 & 0x00003fff); // Q14
+    tmp16no2 += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
+    if (tmpIndFX == 0) {
+      tmpIndFX = 8192 - tmp16no2; // Q14
+    } else {
+      tmpIndFX = 8192 + tmp16no2; // Q14
+    }
+  }
+  indPriorFX = WEBRTC_SPL_MUL_16_16(inst->weightLogLrt, tmpIndFX); // 6*Q14
+
+  //spectral flatness feature
+  if (inst->weightSpecFlat) {
+    tmpU32no1 = WEBRTC_SPL_UMUL(inst->featureSpecFlat, 400); // Q10
+    tmpIndFX = 16384; // Q14(1.0)
+    //use larger width in tanh map for pause regions
+    tmpU32no2 = inst->thresholdSpecFlat - tmpU32no1; //Q10
+    nShifts = 4;
+    if (inst->thresholdSpecFlat < tmpU32no1) {
+      tmpIndFX = 0;
+      tmpU32no2 = tmpU32no1 - inst->thresholdSpecFlat;
+      //widthPrior = widthPrior * 2.0;
+      nShifts++;
+    }
+    tmp32no1 = (WebRtc_Word32)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2,
+                                                                        nShifts), 25); //Q14
+    tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts), 25); //Q14
+    // compute indicator function: sigmoid map
+    // FLOAT code
+    // indicator1 = 0.5 * (tanh(sgnMap * widthPrior * (threshPrior1 - tmpFloat1)) + 1.0);
+    tableIndex = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
+    if (tableIndex < 16) {
+      tmp16no2 = kIndicatorTable[tableIndex];
+      tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+      frac = (WebRtc_Word16)(tmpU32no1 & 0x00003fff); // Q14
+      tmp16no2 += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
+      if (tmpIndFX) {
+        tmpIndFX = 8192 + tmp16no2; // Q14
+      } else {
+        tmpIndFX = 8192 - tmp16no2; // Q14
+      }
+    }
+    indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecFlat, tmpIndFX); // 6*Q14
+  }
+
+  //for template spectral-difference
+  if (inst->weightSpecDiff) {
+    tmpU32no1 = 0;
+    if (inst->featureSpecDiff) {
+      normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
+                               WebRtcSpl_NormU32(inst->featureSpecDiff));
+      tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(inst->featureSpecDiff, normTmp); // Q(normTmp-2*stages)
+      tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy, 20 - inst->stages
+                                        - normTmp);
+      if (tmpU32no2 > 0) {
+        // Q(20 - inst->stages)
+        tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2);
+      } else {
+        tmpU32no1 = (WebRtc_UWord32)(0x7fffffff);
+      }
+    }
+    tmpU32no3 = WEBRTC_SPL_UDIV(WEBRTC_SPL_LSHIFT_U32(inst->thresholdSpecDiff, 17), 25);
+    tmpU32no2 = tmpU32no1 - tmpU32no3;
+    nShifts = 1;
+    tmpIndFX = 16384; // Q14(1.0)
+    //use larger width in tanh map for pause regions
+    if (tmpU32no2 & 0x80000000) {
+      tmpIndFX = 0;
+      tmpU32no2 = tmpU32no3 - tmpU32no1;
+      //widthPrior = widthPrior * 2.0;
+      nShifts--;
+    }
+    tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, nShifts);
+    // compute indicator function: sigmoid map
+    /* FLOAT code
+     indicator2 = 0.5 * (tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.0);
+     */
+    tableIndex = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
+    if (tableIndex < 16) {
+      tmp16no2 = kIndicatorTable[tableIndex];
+      tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+      frac = (WebRtc_Word16)(tmpU32no1 & 0x00003fff); // Q14
+      tmp16no2 += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                    tmp16no1, frac, 14);
+      if (tmpIndFX) {
+        tmpIndFX = 8192 + tmp16no2;
+      } else {
+        tmpIndFX = 8192 - tmp16no2;
+      }
+    }
+    indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecDiff, tmpIndFX); // 6*Q14
+  }
+
+  //combine the indicator function with the feature weights
+  // FLOAT code
+  // indPrior = 1 - (weightIndPrior0 * indicator0 + weightIndPrior1 * indicator1 + weightIndPrior2 * indicator2);
+  indPriorFX16 = WebRtcSpl_DivW32W16ResW16(98307 - indPriorFX, 6); // Q14
+  // done with computing indicator function
+
+  //compute the prior probability
+  // FLOAT code
+  // inst->priorNonSpeechProb += PRIOR_UPDATE * (indPriorNonSpeech - inst->priorNonSpeechProb);
+  tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14
+  inst->priorNonSpeechProb += (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(
+                                PRIOR_UPDATE_Q14, tmp16, 14); // Q14
+
+  //final speech probability: combine prior model with LR factor:
+
+  memset(nonSpeechProbFinal, 0, sizeof(WebRtc_UWord16) * inst->magnLen);
+
+  if (inst->priorNonSpeechProb > 0) {
+    for (i = 0; i < inst->magnLen; i++) {
+      // FLOAT code
+      // invLrt = exp(inst->logLrtTimeAvg[i]);
+      // invLrt = inst->priorSpeechProb * invLrt;
+      // nonSpeechProbFinal[i] = (1.0 - inst->priorSpeechProb) / (1.0 - inst->priorSpeechProb + invLrt);
+      // invLrt = (1.0 - inst->priorNonSpeechProb) * invLrt;
+      // nonSpeechProbFinal[i] = inst->priorNonSpeechProb / (inst->priorNonSpeechProb + invLrt);
+      if (inst->logLrtTimeAvgW32[i] < 65300) {
+        tmp32no1 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(inst->logLrtTimeAvgW32[i], 23637),
+                                         14); // Q12
+        intPart = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 12);
+        if (intPart < -8) {
+          intPart = -8;
+        }
+        frac = (WebRtc_Word16)(tmp32no1 & 0x00000fff); // Q12
+
+        // Quadratic approximation of 2^frac
+        tmp32no2 = WEBRTC_SPL_RSHIFT_W32(frac * frac * 44, 19); // Q12
+        tmp32no2 += WEBRTC_SPL_MUL_16_16_RSFT(frac, 84, 7); // Q12
+        invLrtFX = WEBRTC_SPL_LSHIFT_W32(1, 8 + intPart)
+                   + WEBRTC_SPL_SHIFT_W32(tmp32no2, intPart - 4); // Q8
+
+        normTmp = WebRtcSpl_NormW32(invLrtFX);
+        normTmp2 = WebRtcSpl_NormW16((16384 - inst->priorNonSpeechProb));
+        if (normTmp + normTmp2 >= 7) {
+          if (normTmp + normTmp2 < 15) {
+            invLrtFX = WEBRTC_SPL_RSHIFT_W32(invLrtFX, 15 - normTmp2 - normTmp);
+            // Q(normTmp+normTmp2-7)
+            tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX, (16384 - inst->priorNonSpeechProb));
+            // Q(normTmp+normTmp2+7)
+            invLrtFX = WEBRTC_SPL_SHIFT_W32(tmp32no1, 7 - normTmp - normTmp2); // Q14
+          } else {
+            tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX, (16384 - inst->priorNonSpeechProb)); // Q22
+            invLrtFX = WEBRTC_SPL_RSHIFT_W32(tmp32no1, 8); // Q14
+          }
+
+          tmp32no1 = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)inst->priorNonSpeechProb, 8); // Q22
+
+          nonSpeechProbFinal[i] = (WebRtc_UWord16)WEBRTC_SPL_DIV(tmp32no1,
+              (WebRtc_Word32)inst->priorNonSpeechProb + invLrtFX); // Q8
+        }
+      }
+    }
+  }
+}
+
+// Transform input (speechFrame) to frequency domain magnitude (magnU16)
+void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, WebRtc_UWord16* magnU16) {
+
+  WebRtc_UWord32 tmpU32no1, tmpU32no2;
+
+  WebRtc_Word32   tmp_1_w32 = 0;
+  WebRtc_Word32   tmp_2_w32 = 0;
+  WebRtc_Word32   sum_log_magn = 0;
+  WebRtc_Word32   sum_log_i_log_magn = 0;
+
+  WebRtc_UWord16  sum_log_magn_u16 = 0;
+  WebRtc_UWord16  tmp_u16 = 0;
+
+  WebRtc_Word16   sum_log_i = 0;
+  WebRtc_Word16   sum_log_i_square = 0;
+  WebRtc_Word16   frac = 0;
+  WebRtc_Word16   log2 = 0;
+  WebRtc_Word16   matrix_determinant = 0;
+  WebRtc_Word16   winData[ANAL_BLOCKL_MAX], maxWinData;
+  WebRtc_Word16   realImag[ANAL_BLOCKL_MAX << 1];
+
+  int i, j;
+  int zeros;
+  int net_norm = 0;
+  int right_shifts_in_magnU16 = 0;
+  int right_shifts_in_initMagnEst = 0;
+
+  // Update analysis buffer for lower band, and window data before FFT.
+  WebRtcNsx_AnalysisUpdate(inst, winData, speechFrame);
+
+  // Get input energy
+  inst->energyIn = WebRtcSpl_Energy(winData, (int)inst->anaLen, &(inst->scaleEnergyIn));
+
+  // Reset zero input flag
+  inst->zeroInputSignal = 0;
+  // Acquire norm for winData
+  maxWinData = WebRtcSpl_MaxAbsValueW16(winData, inst->anaLen);
+  inst->normData = WebRtcSpl_NormW16(maxWinData);
+  if (maxWinData == 0) {
+    // Treat zero input separately.
+    inst->zeroInputSignal = 1;
+    return;
+  }
+
+  // Determine the net normalization in the frequency domain
+  net_norm = inst->stages - inst->normData;
+  // Track lowest normalization factor and use it to prevent wrap around in shifting
+  right_shifts_in_magnU16 = inst->normData - inst->minNorm;
+  right_shifts_in_initMagnEst = WEBRTC_SPL_MAX(-right_shifts_in_magnU16, 0);
+  inst->minNorm -= right_shifts_in_initMagnEst;
+  right_shifts_in_magnU16 = WEBRTC_SPL_MAX(right_shifts_in_magnU16, 0);
+
+  // create realImag as winData interleaved with zeros (= imag. part), normalize it
+  WebRtcNsx_CreateComplexBuffer(inst, winData, realImag);
+
+  // bit-reverse position of elements in array and FFT the array
+  WebRtcSpl_ComplexBitReverse(realImag, inst->stages); // Q(normData-stages)
+  WebRtcSpl_ComplexFFT(realImag, inst->stages, 1);
+
+  inst->imag[0] = 0; // Q(normData-stages)
+  inst->imag[inst->anaLen2] = 0;
+  inst->real[0] = realImag[0]; // Q(normData-stages)
+  inst->real[inst->anaLen2] = realImag[inst->anaLen];
+  // Q(2*(normData-stages))
+  inst->magnEnergy = (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(inst->real[0], inst->real[0]);
+  inst->magnEnergy += (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(inst->real[inst->anaLen2],
+                                                           inst->real[inst->anaLen2]);
+  magnU16[0] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(inst->real[0]); // Q(normData-stages)
+  magnU16[inst->anaLen2] = (WebRtc_UWord16)WEBRTC_SPL_ABS_W16(inst->real[inst->anaLen2]);
+  inst->sumMagn = (WebRtc_UWord32)magnU16[0]; // Q(normData-stages)
+  inst->sumMagn += (WebRtc_UWord32)magnU16[inst->anaLen2];
+
+  if (inst->blockIndex >= END_STARTUP_SHORT) {
+    for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) {
+      inst->real[i] = realImag[j];
+      inst->imag[i] = -realImag[j + 1];
+      // magnitude spectrum
+      // energy in Q(2*(normData-stages))
+      tmpU32no1 = (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(realImag[j], realImag[j]);
+      tmpU32no1 += (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(realImag[j + 1], realImag[j + 1]);
+      inst->magnEnergy += tmpU32no1; // Q(2*(normData-stages))
+
+      magnU16[i] = (WebRtc_UWord16)WebRtcSpl_SqrtFloor(tmpU32no1); // Q(normData-stages)
+      inst->sumMagn += (WebRtc_UWord32)magnU16[i]; // Q(normData-stages)
+    }
+  } else {
+    //
+    // Gather information during startup for noise parameter estimation
+    //
+
+    // Switch initMagnEst to Q(minNorm-stages)
+    inst->initMagnEst[0] = WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[0],
+                                                 right_shifts_in_initMagnEst);
+    inst->initMagnEst[inst->anaLen2] =
+      WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[inst->anaLen2],
+                            right_shifts_in_initMagnEst); // Q(minNorm-stages)
+
+    // Shift magnU16 to same domain as initMagnEst
+    tmpU32no1 = WEBRTC_SPL_RSHIFT_W32((WebRtc_UWord32)magnU16[0],
+                                      right_shifts_in_magnU16); // Q(minNorm-stages)
+    tmpU32no2 = WEBRTC_SPL_RSHIFT_W32((WebRtc_UWord32)magnU16[inst->anaLen2],
+                                      right_shifts_in_magnU16); // Q(minNorm-stages)
+
+    // Update initMagnEst
+    inst->initMagnEst[0] += tmpU32no1; // Q(minNorm-stages)
+    inst->initMagnEst[inst->anaLen2] += tmpU32no2; // Q(minNorm-stages)
+
+    log2 = 0;
+    if (magnU16[inst->anaLen2]) {
+      // Calculate log2(magnU16[inst->anaLen2])
+      zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magnU16[inst->anaLen2]);
+      frac = (WebRtc_Word16)((((WebRtc_UWord32)magnU16[inst->anaLen2] << zeros) &
+                              0x7FFFFFFF) >> 23); // Q8
+      // log2(magnU16(i)) in Q8
+      assert(frac < 256);
+      log2 = (WebRtc_Word16)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]);
+    }
+
+    sum_log_magn = (WebRtc_Word32)log2; // Q8
+    // sum_log_i_log_magn in Q17
+    sum_log_i_log_magn = (WEBRTC_SPL_MUL_16_16(kLogIndex[inst->anaLen2], log2) >> 3);
+
+    for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) {
+      inst->real[i] = realImag[j];
+      inst->imag[i] = -realImag[j + 1];
+      // magnitude spectrum
+      // energy in Q(2*(normData-stages))
+      tmpU32no1 = (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(realImag[j], realImag[j]);
+      tmpU32no1 += (WebRtc_UWord32)WEBRTC_SPL_MUL_16_16(realImag[j + 1], realImag[j + 1]);
+      inst->magnEnergy += tmpU32no1; // Q(2*(normData-stages))
+
+      magnU16[i] = (WebRtc_UWord16)WebRtcSpl_SqrtFloor(tmpU32no1); // Q(normData-stages)
+      inst->sumMagn += (WebRtc_UWord32)magnU16[i]; // Q(normData-stages)
+
+      // Switch initMagnEst to Q(minNorm-stages)
+      inst->initMagnEst[i] = WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[i],
+                                                   right_shifts_in_initMagnEst);
+
+      // Shift magnU16 to same domain as initMagnEst, i.e., Q(minNorm-stages)
+      tmpU32no1 = WEBRTC_SPL_RSHIFT_W32((WebRtc_UWord32)magnU16[i],
+                                        right_shifts_in_magnU16);
+      // Update initMagnEst
+      inst->initMagnEst[i] += tmpU32no1; // Q(minNorm-stages)
+
+      if (i >= kStartBand) {
+        // For pink noise estimation. Collect data neglecting lower frequency band
+        log2 = 0;
+        if (magnU16[i]) {
+          zeros = WebRtcSpl_NormU32((WebRtc_UWord32)magnU16[i]);
+          frac = (WebRtc_Word16)((((WebRtc_UWord32)magnU16[i] << zeros) &
+                                  0x7FFFFFFF) >> 23);
+          // log2(magnU16(i)) in Q8
+          assert(frac < 256);
+          log2 = (WebRtc_Word16)(((31 - zeros) << 8)
+                                 + WebRtcNsx_kLogTableFrac[frac]);
+        }
+        sum_log_magn += (WebRtc_Word32)log2; // Q8
+        // sum_log_i_log_magn in Q17
+        sum_log_i_log_magn += (WEBRTC_SPL_MUL_16_16(kLogIndex[i], log2) >> 3);
+      }
+    }
+
+    //
+    //compute simplified noise model during startup
+    //
+
+    // Estimate White noise
+
+    // Switch whiteNoiseLevel to Q(minNorm-stages)
+    inst->whiteNoiseLevel = WEBRTC_SPL_RSHIFT_U32(inst->whiteNoiseLevel,
+                                                  right_shifts_in_initMagnEst);
+
+    // Update the average magnitude spectrum, used as noise estimate.
+    tmpU32no1 = WEBRTC_SPL_UMUL_32_16(inst->sumMagn, inst->overdrive);
+    tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, inst->stages + 8);
+
+    // Replacing division above with 'stages' shifts
+    // Shift to same Q-domain as whiteNoiseLevel
+    tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, right_shifts_in_magnU16);
+    // This operation is safe from wrap around as long as END_STARTUP_SHORT < 128
+    assert(END_STARTUP_SHORT < 128);
+    inst->whiteNoiseLevel += tmpU32no1; // Q(minNorm-stages)
+
+    // Estimate Pink noise parameters
+    // Denominator used in both parameter estimates.
+    // The value is only dependent on the size of the frequency band (kStartBand)
+    // and to reduce computational complexity stored in a table (kDeterminantEstMatrix[])
+    assert(kStartBand < 66);
+    matrix_determinant = kDeterminantEstMatrix[kStartBand]; // Q0
+    sum_log_i = kSumLogIndex[kStartBand]; // Q5
+    sum_log_i_square = kSumSquareLogIndex[kStartBand]; // Q2
+    if (inst->fs == 8000) {
+      // Adjust values to shorter blocks in narrow band.
+      tmp_1_w32 = (WebRtc_Word32)matrix_determinant;
+      tmp_1_w32 += WEBRTC_SPL_MUL_16_16_RSFT(kSumLogIndex[65], sum_log_i, 9);
+      tmp_1_w32 -= WEBRTC_SPL_MUL_16_16_RSFT(kSumLogIndex[65], kSumLogIndex[65], 10);
+      tmp_1_w32 -= WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)sum_log_i_square, 4);
+      tmp_1_w32 -= WEBRTC_SPL_MUL_16_16_RSFT((WebRtc_Word16)
+                       (inst->magnLen - kStartBand), kSumSquareLogIndex[65], 2);
+      matrix_determinant = (WebRtc_Word16)tmp_1_w32;
+      sum_log_i -= kSumLogIndex[65]; // Q5
+      sum_log_i_square -= kSumSquareLogIndex[65]; // Q2
+    }
+
+    // Necessary number of shifts to fit sum_log_magn in a word16
+    zeros = 16 - WebRtcSpl_NormW32(sum_log_magn);
+    if (zeros < 0) {
+      zeros = 0;
+    }
+    tmp_1_w32 = WEBRTC_SPL_LSHIFT_W32(sum_log_magn, 1); // Q9
+    sum_log_magn_u16 = (WebRtc_UWord16)WEBRTC_SPL_RSHIFT_W32(tmp_1_w32, zeros);//Q(9-zeros)
+
+    // Calculate and update pinkNoiseNumerator. Result in Q11.
+    tmp_2_w32 = WEBRTC_SPL_MUL_16_U16(sum_log_i_square, sum_log_magn_u16); // Q(11-zeros)
+    tmpU32no1 = WEBRTC_SPL_RSHIFT_U32((WebRtc_UWord32)sum_log_i_log_magn, 12); // Q5
+
+    // Shift the largest value of sum_log_i and tmp32no3 before multiplication
+    tmp_u16 = WEBRTC_SPL_LSHIFT_U16((WebRtc_UWord16)sum_log_i, 1); // Q6
+    if ((WebRtc_UWord32)sum_log_i > tmpU32no1) {
+      tmp_u16 = WEBRTC_SPL_RSHIFT_U16(tmp_u16, zeros);
+    } else {
+      tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, zeros);
+    }
+    tmp_2_w32 -= (WebRtc_Word32)WEBRTC_SPL_UMUL_32_16(tmpU32no1, tmp_u16); // Q(11-zeros)
+    matrix_determinant = WEBRTC_SPL_RSHIFT_W16(matrix_determinant, zeros); // Q(-zeros)
+    tmp_2_w32 = WebRtcSpl_DivW32W16(tmp_2_w32, matrix_determinant); // Q11
+    tmp_2_w32 += WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)net_norm, 11); // Q11
+    if (tmp_2_w32 < 0) {
+      tmp_2_w32 = 0;
+    }
+    inst->pinkNoiseNumerator += tmp_2_w32; // Q11
+
+    // Calculate and update pinkNoiseExp. Result in Q14.
+    tmp_2_w32 = WEBRTC_SPL_MUL_16_U16(sum_log_i, sum_log_magn_u16); // Q(14-zeros)
+    tmp_1_w32 = WEBRTC_SPL_RSHIFT_W32(sum_log_i_log_magn, 3 + zeros);
+    tmp_1_w32 = WEBRTC_SPL_MUL((WebRtc_Word32)(inst->magnLen - kStartBand),
+                               tmp_1_w32);
+    tmp_2_w32 -= tmp_1_w32; // Q(14-zeros)
+    if (tmp_2_w32 > 0) {
+      // If the exponential parameter is negative force it to zero, which means a
+      // flat spectrum.
+      tmp_1_w32 = WebRtcSpl_DivW32W16(tmp_2_w32, matrix_determinant); // Q14
+      inst->pinkNoiseExp += WEBRTC_SPL_SAT(16384, tmp_1_w32, 0); // Q14
+    }
+  }
+}
+
+void WebRtcNsx_DataSynthesis(NsxInst_t* inst, short* outFrame) {
+  WebRtc_Word32 energyOut;
+
+  WebRtc_Word16 realImag[ANAL_BLOCKL_MAX << 1];
+  WebRtc_Word16 tmp16no1, tmp16no2;
+  WebRtc_Word16 energyRatio;
+  WebRtc_Word16 gainFactor, gainFactor1, gainFactor2;
+
+  int i;
+  int outCIFFT;
+  int scaleEnergyOut = 0;
+
+  if (inst->zeroInputSignal) {
+    // synthesize the special case of zero input
+    // read out fully processed segment
+    for (i = 0; i < inst->blockLen10ms; i++) {
+      outFrame[i] = inst->synthesisBuffer[i]; // Q0
+    }
+    // update synthesis buffer
+    WEBRTC_SPL_MEMCPY_W16(inst->synthesisBuffer,
+                          inst->synthesisBuffer + inst->blockLen10ms,
+                          inst->anaLen - inst->blockLen10ms);
+    WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer + inst->anaLen - inst->blockLen10ms,
+                            inst->blockLen10ms);
+    return;
+  }
+
+  // Filter the data in the frequency domain, and create spectrum.
+  WebRtcNsx_PrepareSpectrum(inst, realImag);
+
+  // bit-reverse position of elements in array and IFFT it
+  WebRtcSpl_ComplexBitReverse(realImag, inst->stages);
+  outCIFFT = WebRtcSpl_ComplexIFFT(realImag, inst->stages, 1);
+
+  // Denormalize.
+  WebRtcNsx_Denormalize(inst, realImag, outCIFFT);
+
+  //scale factor: only do it after END_STARTUP_LONG time
+  gainFactor = 8192; // 8192 = Q13(1.0)
+  if (inst->gainMap == 1 &&
+      inst->blockIndex > END_STARTUP_LONG &&
+      inst->energyIn > 0) {
+    energyOut = WebRtcSpl_Energy(inst->real, (int)inst->anaLen, &scaleEnergyOut); // Q(-scaleEnergyOut)
+    if (scaleEnergyOut == 0 && !(energyOut & 0x7f800000)) {
+      energyOut = WEBRTC_SPL_SHIFT_W32(energyOut, 8 + scaleEnergyOut
+                                       - inst->scaleEnergyIn);
+    } else {
+      inst->energyIn = WEBRTC_SPL_RSHIFT_W32(inst->energyIn, 8 + scaleEnergyOut
+                                             - inst->scaleEnergyIn); // Q(-8-scaleEnergyOut)
+    }
+
+    assert(inst->energyIn > 0);
+    energyRatio = (WebRtc_Word16)WEBRTC_SPL_DIV(energyOut
+        + WEBRTC_SPL_RSHIFT_W32(inst->energyIn, 1), inst->energyIn); // Q8
+    // Limit the ratio to [0, 1] in Q8, i.e., [0, 256]
+    energyRatio = WEBRTC_SPL_SAT(256, energyRatio, 0);
+
+    // all done in lookup tables now
+    assert(energyRatio < 257);
+    gainFactor1 = kFactor1Table[energyRatio]; // Q8
+    gainFactor2 = inst->factor2Table[energyRatio]; // Q8
+
+    //combine both scales with speech/noise prob: note prior (priorSpeechProb) is not frequency dependent
+
+    // factor = inst->priorSpeechProb*factor1 + (1.0-inst->priorSpeechProb)*factor2; // original code
+    tmp16no1 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(16384 - inst->priorNonSpeechProb,
+                                                        gainFactor1, 14); // Q13 16384 = Q14(1.0)
+    tmp16no2 = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(inst->priorNonSpeechProb,
+                                                        gainFactor2, 14); // Q13;
+    gainFactor = tmp16no1 + tmp16no2; // Q13
+  } // out of flag_gain_map==1
+
+  // Synthesis, read out fully processed segment, and update synthesis buffer.
+  WebRtcNsx_SynthesisUpdate(inst, outFrame, gainFactor);
+}
+
+int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFrameHB,
+                          short* outFrame, short* outFrameHB) {
+  // main routine for noise suppression
+
+  WebRtc_UWord32 tmpU32no1, tmpU32no2, tmpU32no3;
+  WebRtc_UWord32 satMax, maxNoiseU32;
+  WebRtc_UWord32 tmpMagnU32, tmpNoiseU32;
+  WebRtc_UWord32 nearMagnEst;
+  WebRtc_UWord32 noiseUpdateU32;
+  WebRtc_UWord32 noiseU32[HALF_ANAL_BLOCKL];
+  WebRtc_UWord32 postLocSnr[HALF_ANAL_BLOCKL];
+  WebRtc_UWord32 priorLocSnr[HALF_ANAL_BLOCKL];
+  WebRtc_UWord32 prevNearSnr[HALF_ANAL_BLOCKL];
+  WebRtc_UWord32 curNearSnr;
+  WebRtc_UWord32 priorSnr;
+  WebRtc_UWord32 noise_estimate = 0;
+  WebRtc_UWord32 noise_estimate_avg = 0;
+  WebRtc_UWord32 numerator = 0;
+
+  WebRtc_Word32 tmp32no1, tmp32no2;
+  WebRtc_Word32 pink_noise_num_avg = 0;
+
+  WebRtc_UWord16 tmpU16no1;
+  WebRtc_UWord16 magnU16[HALF_ANAL_BLOCKL];
+  WebRtc_UWord16 prevNoiseU16[HALF_ANAL_BLOCKL];
+  WebRtc_UWord16 nonSpeechProbFinal[HALF_ANAL_BLOCKL];
+  WebRtc_UWord16 gammaNoise, prevGammaNoise;
+  WebRtc_UWord16 noiseSupFilterTmp[HALF_ANAL_BLOCKL];
+
+  WebRtc_Word16 qMagn, qNoise;
+  WebRtc_Word16 avgProbSpeechHB, gainModHB, avgFilterGainHB, gainTimeDomainHB;
+  WebRtc_Word16 pink_noise_exp_avg = 0;
+
+  int i;
+  int nShifts, postShifts;
+  int norm32no1, norm32no2;
+  int flag, sign;
+  int q_domain_to_use = 0;
+
+  // Code for ARMv7-Neon platform assumes the following:
+  assert(inst->anaLen % 16 == 0);
+  assert(inst->anaLen2 % 8 == 0);
+  assert(inst->blockLen10ms % 16 == 0);
+  assert(inst->magnLen == inst->anaLen2 + 1);
+
+#ifdef NS_FILEDEBUG
+  fwrite(spframe, sizeof(short), inst->blockLen10ms, inst->infile);
+#endif
+
+  // Check that initialization has been done
+  if (inst->initFlag != 1) {
+    return -1;
+  }
+  // Check for valid pointers based on sampling rate
+  if ((inst->fs == 32000) && (speechFrameHB == NULL)) {
+    return -1;
+  }
+
+  // Store speechFrame and transform to frequency domain
+  WebRtcNsx_DataAnalysis(inst, speechFrame, magnU16);
+
+  if (inst->zeroInputSignal) {
+    WebRtcNsx_DataSynthesis(inst, outFrame);
+
+    if (inst->fs == 32000) {
+      // update analysis buffer for H band
+      // append new data to buffer FX
+      WEBRTC_SPL_MEMCPY_W16(inst->dataBufHBFX, inst->dataBufHBFX + inst->blockLen10ms,
+                            inst->anaLen - inst->blockLen10ms);
+      WEBRTC_SPL_MEMCPY_W16(inst->dataBufHBFX + inst->anaLen - inst->blockLen10ms,
+                            speechFrameHB, inst->blockLen10ms);
+      for (i = 0; i < inst->blockLen10ms; i++) {
+        outFrameHB[i] = inst->dataBufHBFX[i]; // Q0
+      }
+    } // end of H band gain computation
+    return 0;
+  }
+
+  // Update block index when we have something to process
+  inst->blockIndex++;
+  //
+
+  // Norm of magn
+  qMagn = inst->normData - inst->stages;
+
+  // Compute spectral flatness on input spectrum
+  WebRtcNsx_ComputeSpectralFlatness(inst, magnU16);
+
+  // quantile noise estimate
+  WebRtcNsx_NoiseEstimation(inst, magnU16, noiseU32, &qNoise);
+
+  //noise estimate from previous frame
+  for (i = 0; i < inst->magnLen; i++) {
+    prevNoiseU16[i] = (WebRtc_UWord16)WEBRTC_SPL_RSHIFT_U32(inst->prevNoiseU32[i], 11); // Q(prevQNoise)
+  }
+
+  if (inst->blockIndex < END_STARTUP_SHORT) {
+    // Noise Q-domain to be used later; see description at end of section.
+    q_domain_to_use = WEBRTC_SPL_MIN((int)qNoise, inst->minNorm - inst->stages);
+
+    // Calculate frequency independent parts in parametric noise estimate and calculate
+    // the estimate for the lower frequency band (same values for all frequency bins)
+    if (inst->pinkNoiseExp) {
+      pink_noise_exp_avg = (WebRtc_Word16)WebRtcSpl_DivW32W16(inst->pinkNoiseExp,
+                                                              (WebRtc_Word16)(inst->blockIndex + 1)); // Q14
+      pink_noise_num_avg = WebRtcSpl_DivW32W16(inst->pinkNoiseNumerator,
+                                               (WebRtc_Word16)(inst->blockIndex + 1)); // Q11
+      WebRtcNsx_CalcParametricNoiseEstimate(inst,
+                                            pink_noise_exp_avg,
+                                            pink_noise_num_avg,
+                                            kStartBand,
+                                            &noise_estimate,
+                                            &noise_estimate_avg);
+    } else {
+      // Use white noise estimate if we have poor pink noise parameter estimates
+      noise_estimate = inst->whiteNoiseLevel; // Q(minNorm-stages)
+      noise_estimate_avg = noise_estimate / (inst->blockIndex + 1); // Q(minNorm-stages)
+    }
+    for (i = 0; i < inst->magnLen; i++) {
+      // Estimate the background noise using the pink noise parameters if permitted
+      if ((inst->pinkNoiseExp) && (i >= kStartBand)) {
+        // Reset noise_estimate
+        noise_estimate = 0;
+        noise_estimate_avg = 0;
+        // Calculate the parametric noise estimate for current frequency bin
+        WebRtcNsx_CalcParametricNoiseEstimate(inst,
+                                              pink_noise_exp_avg,
+                                              pink_noise_num_avg,
+                                              i,
+                                              &noise_estimate,
+                                              &noise_estimate_avg);
+      }
+      // Calculate parametric Wiener filter
+      noiseSupFilterTmp[i] = inst->denoiseBound;
+      if (inst->initMagnEst[i]) {
+        // numerator = (initMagnEst - noise_estimate * overdrive)
+        // Result in Q(8+minNorm-stages)
+        tmpU32no1 = WEBRTC_SPL_UMUL_32_16(noise_estimate, inst->overdrive);
+        numerator = WEBRTC_SPL_LSHIFT_U32(inst->initMagnEst[i], 8);
+        if (numerator > tmpU32no1) {
+          // Suppression filter coefficient larger than zero, so calculate.
+          numerator -= tmpU32no1;
+
+          // Determine number of left shifts in numerator for best accuracy after
+          // division
+          nShifts = WebRtcSpl_NormU32(numerator);
+          nShifts = WEBRTC_SPL_SAT(6, nShifts, 0);
+
+          // Shift numerator to Q(nShifts+8+minNorm-stages)
+          numerator = WEBRTC_SPL_LSHIFT_U32(numerator, nShifts);
+
+          // Shift denominator to Q(nShifts-6+minNorm-stages)
+          tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(inst->initMagnEst[i], 6 - nShifts);
+          if (tmpU32no1 == 0) {
+            // This is only possible if numerator = 0, in which case
+            // we don't need any division.
+            tmpU32no1 = 1;
+          }
+          tmpU32no2 = WEBRTC_SPL_UDIV(numerator, tmpU32no1); // Q14
+          noiseSupFilterTmp[i] = (WebRtc_UWord16)WEBRTC_SPL_SAT(16384, tmpU32no2,
+              (WebRtc_UWord32)(inst->denoiseBound)); // Q14
+        }
+      }
+      // Weight quantile noise 'noiseU32' with modeled noise 'noise_estimate_avg'
+      // 'noiseU32 is in Q(qNoise) and 'noise_estimate' in Q(minNorm-stages)
+      // To guarantee that we do not get wrap around when shifting to the same domain
+      // we use the lowest one. Furthermore, we need to save 6 bits for the weighting.
+      // 'noise_estimate_avg' can handle this operation by construction, but 'noiseU32'
+      // may not.
+
+      // Shift 'noiseU32' to 'q_domain_to_use'
+      tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], (int)qNoise - q_domain_to_use);
+      // Shift 'noise_estimate_avg' to 'q_domain_to_use'
+      tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(noise_estimate_avg, inst->minNorm - inst->stages
+                                        - q_domain_to_use);
+      // Make a simple check to see if we have enough room for weighting 'tmpU32no1'
+      // without wrap around
+      nShifts = 0;
+      if (tmpU32no1 & 0xfc000000) {
+        tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 6);
+        tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, 6);
+        nShifts = 6;
+      }
+      tmpU32no1 *= inst->blockIndex;
+      tmpU32no2 *= (END_STARTUP_SHORT - inst->blockIndex);
+      // Add them together and divide by startup length
+      noiseU32[i] = WebRtcSpl_DivU32U16(tmpU32no1 + tmpU32no2, END_STARTUP_SHORT);
+      // Shift back if necessary
+      noiseU32[i] = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], nShifts);
+    }
+    // Update new Q-domain for 'noiseU32'
+    qNoise = q_domain_to_use;
+  }
+  // compute average signal during END_STARTUP_LONG time:
+  // used to normalize spectral difference measure
+  if (inst->blockIndex < END_STARTUP_LONG) {
+    // substituting division with shift ending up in Q(-2*stages)
+    inst->timeAvgMagnEnergyTmp
+    += WEBRTC_SPL_RSHIFT_U32(inst->magnEnergy,
+                             2 * inst->normData + inst->stages - 1);
+    inst->timeAvgMagnEnergy = WebRtcSpl_DivU32U16(inst->timeAvgMagnEnergyTmp,
+                                                  inst->blockIndex + 1);
+  }
+
+  //start processing at frames == converged+1
+  // STEP 1: compute prior and post SNR based on quantile noise estimates
+
+  // compute direct decision (DD) estimate of prior SNR: needed for new method
+  satMax = (WebRtc_UWord32)1048575;// Largest possible value without getting overflow despite shifting 12 steps
+  postShifts = 6 + qMagn - qNoise;
+  nShifts = 5 - inst->prevQMagn + inst->prevQNoise;
+  for (i = 0; i < inst->magnLen; i++) {
+    // FLOAT:
+    // post SNR
+    // postLocSnr[i] = 0.0;
+    // if (magn[i] > noise[i])
+    // {
+    //   postLocSnr[i] = magn[i] / (noise[i] + 0.0001);
+    // }
+    // // previous post SNR
+    // // previous estimate: based on previous frame with gain filter (smooth is previous filter)
+    //
+    // prevNearSnr[i] = inst->prevMagnU16[i] / (inst->noisePrev[i] + 0.0001) * (inst->smooth[i]);
+    //
+    // // DD estimate is sum of two terms: current estimate and previous estimate
+    // // directed decision update of priorSnr (or we actually store [2*priorSnr+1])
+    //
+    // priorLocSnr[i] = DD_PR_SNR * prevNearSnr[i] + (1.0 - DD_PR_SNR) * (postLocSnr[i] - 1.0);
+
+    // calculate post SNR: output in Q11
+    postLocSnr[i] = 2048; // 1.0 in Q11
+    tmpU32no1 = WEBRTC_SPL_LSHIFT_U32((WebRtc_UWord32)magnU16[i], 6); // Q(6+qMagn)
+    if (postShifts < 0) {
+      tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], -postShifts); // Q(6+qMagn)
+    } else {
+      tmpU32no2 = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], postShifts); // Q(6+qMagn)
+    }
+    if (tmpU32no1 > tmpU32no2) {
+      // Current magnitude larger than noise
+      tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, 11); // Q(17+qMagn)
+      if (tmpU32no2 > 0) {
+        tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2); // Q11
+        postLocSnr[i] = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11
+      } else {
+        postLocSnr[i] = satMax;
+      }
+    }
+
+    // calculate prevNearSnr[i] and save for later instead of recalculating it later
+    nearMagnEst = WEBRTC_SPL_UMUL_16_16(inst->prevMagnU16[i], inst->noiseSupFilter[i]); // Q(prevQMagn+14)
+    tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(nearMagnEst, 3); // Q(prevQMagn+17)
+    tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->prevNoiseU32[i], nShifts); // Q(prevQMagn+6)
+
+    if (tmpU32no2 > 0) {
+      tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2); // Q11
+      tmpU32no1 = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11
+    } else {
+      tmpU32no1 = satMax; // Q11
+    }
+    prevNearSnr[i] = tmpU32no1; // Q11
+
+    //directed decision update of priorSnr
+    tmpU32no1 = WEBRTC_SPL_UMUL_32_16(prevNearSnr[i], DD_PR_SNR_Q11); // Q22
+    tmpU32no2 = WEBRTC_SPL_UMUL_32_16(postLocSnr[i] - 2048, ONE_MINUS_DD_PR_SNR_Q11); // Q22
+    priorSnr = tmpU32no1 + tmpU32no2 + 512; // Q22 (added 512 for rounding)
+    // priorLocSnr = 1 + 2*priorSnr
+    priorLocSnr[i] = 2048 + WEBRTC_SPL_RSHIFT_U32(priorSnr, 10); // Q11
+  } // end of loop over frequencies
+  // done with step 1: DD computation of prior and post SNR
+
+  // STEP 2: compute speech/noise likelihood
+
+  //compute difference of input spectrum with learned/estimated noise spectrum
+  WebRtcNsx_ComputeSpectralDifference(inst, magnU16);
+  //compute histograms for determination of parameters (thresholds and weights for features)
+  //parameters are extracted once every window time (=inst->modelUpdate)
+  //counter update
+  inst->cntThresUpdate++;
+  flag = (int)(inst->cntThresUpdate == inst->modelUpdate);
+  //update histogram
+  WebRtcNsx_FeatureParameterExtraction(inst, flag);
+  //compute model parameters
+  if (flag) {
+    inst->cntThresUpdate = 0; // Reset counter
+    //update every window:
+    // get normalization for spectral difference for next window estimate
+
+    // Shift to Q(-2*stages)
+    inst->curAvgMagnEnergy = WEBRTC_SPL_RSHIFT_U32(inst->curAvgMagnEnergy, STAT_UPDATES);
+
+    tmpU32no1 = (inst->curAvgMagnEnergy + inst->timeAvgMagnEnergy + 1) >> 1; //Q(-2*stages)
+    // Update featureSpecDiff
+    if ((tmpU32no1 != inst->timeAvgMagnEnergy) && (inst->featureSpecDiff) &&
+        (inst->timeAvgMagnEnergy > 0)) {
+      norm32no1 = 0;
+      tmpU32no3 = tmpU32no1;
+      while (0xFFFF0000 & tmpU32no3) {
+        tmpU32no3 >>= 1;
+        norm32no1++;
+      }
+      tmpU32no2 = inst->featureSpecDiff;
+      while (0xFFFF0000 & tmpU32no2) {
+        tmpU32no2 >>= 1;
+        norm32no1++;
+      }
+      tmpU32no3 = WEBRTC_SPL_UMUL(tmpU32no3, tmpU32no2);
+      tmpU32no3 = WEBRTC_SPL_UDIV(tmpU32no3, inst->timeAvgMagnEnergy);
+      if (WebRtcSpl_NormU32(tmpU32no3) < norm32no1) {
+        inst->featureSpecDiff = 0x007FFFFF;
+      } else {
+        inst->featureSpecDiff = WEBRTC_SPL_MIN(0x007FFFFF,
+            WEBRTC_SPL_LSHIFT_U32(tmpU32no3, norm32no1));
+      }
+    }
+
+    inst->timeAvgMagnEnergy = tmpU32no1; // Q(-2*stages)
+    inst->curAvgMagnEnergy = 0;
+  }
+
+  //compute speech/noise probability
+  WebRtcNsx_SpeechNoiseProb(inst, nonSpeechProbFinal, priorLocSnr, postLocSnr);
+
+  //time-avg parameter for noise update
+  gammaNoise = NOISE_UPDATE_Q8; // Q8
+
+  maxNoiseU32 = 0;
+  postShifts = inst->prevQNoise - qMagn;
+  nShifts = inst->prevQMagn - qMagn;
+  for (i = 0; i < inst->magnLen; i++) {
+    // temporary noise update: use it for speech frames if update value is less than previous
+    // the formula has been rewritten into:
+    // noiseUpdate = noisePrev[i] + (1 - gammaNoise) * nonSpeechProb * (magn[i] - noisePrev[i])
+
+    if (postShifts < 0) {
+      tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(magnU16[i], -postShifts); // Q(prevQNoise)
+    } else {
+      tmpU32no2 = WEBRTC_SPL_LSHIFT_U32(magnU16[i], postShifts); // Q(prevQNoise)
+    }
+    if (prevNoiseU16[i] > tmpU32no2) {
+      sign = -1;
+      tmpU32no1 = prevNoiseU16[i] - tmpU32no2;
+    } else {
+      sign = 1;
+      tmpU32no1 = tmpU32no2 - prevNoiseU16[i];
+    }
+    noiseUpdateU32 = inst->prevNoiseU32[i]; // Q(prevQNoise+11)
+    tmpU32no3 = 0;
+    if ((tmpU32no1) && (nonSpeechProbFinal[i])) {
+      // This value will be used later, if gammaNoise changes
+      tmpU32no3 = WEBRTC_SPL_UMUL_32_16(tmpU32no1, nonSpeechProbFinal[i]); // Q(prevQNoise+8)
+      if (0x7c000000 & tmpU32no3) {
+        // Shifting required before multiplication
+        tmpU32no2
+          = WEBRTC_SPL_UMUL_32_16(WEBRTC_SPL_RSHIFT_U32(tmpU32no3, 5), gammaNoise); // Q(prevQNoise+11)
+      } else {
+        // We can do shifting after multiplication
+        tmpU32no2
+          = WEBRTC_SPL_RSHIFT_U32(WEBRTC_SPL_UMUL_32_16(tmpU32no3, gammaNoise), 5); // Q(prevQNoise+11)
+      }
+      if (sign > 0) {
+        noiseUpdateU32 += tmpU32no2; // Q(prevQNoise+11)
+      } else {
+        // This operation is safe. We can never get wrap around, since worst
+        // case scenario means magnU16 = 0
+        noiseUpdateU32 -= tmpU32no2; // Q(prevQNoise+11)
+      }
+    }
+
+    //increase gamma (i.e., less noise update) for frame likely to be speech
+    prevGammaNoise = gammaNoise;
+    gammaNoise = NOISE_UPDATE_Q8;
+    //time-constant based on speech/noise state
+    //increase gamma (i.e., less noise update) for frames likely to be speech
+    if (nonSpeechProbFinal[i] < ONE_MINUS_PROB_RANGE_Q8) {
+      gammaNoise = GAMMA_NOISE_TRANS_AND_SPEECH_Q8;
+    }
+
+    if (prevGammaNoise != gammaNoise) {
+      // new noise update
+      // this line is the same as above, only that the result is stored in a different variable and the gammaNoise
+      // has changed
+      //
+      // noiseUpdate = noisePrev[i] + (1 - gammaNoise) * nonSpeechProb * (magn[i] - noisePrev[i])
+
+      if (0x7c000000 & tmpU32no3) {
+        // Shifting required before multiplication
+        tmpU32no2
+          = WEBRTC_SPL_UMUL_32_16(WEBRTC_SPL_RSHIFT_U32(tmpU32no3, 5), gammaNoise); // Q(prevQNoise+11)
+      } else {
+        // We can do shifting after multiplication
+        tmpU32no2
+          = WEBRTC_SPL_RSHIFT_U32(WEBRTC_SPL_UMUL_32_16(tmpU32no3, gammaNoise), 5); // Q(prevQNoise+11)
+      }
+      if (sign > 0) {
+        tmpU32no1 = inst->prevNoiseU32[i] + tmpU32no2; // Q(prevQNoise+11)
+      } else {
+        tmpU32no1 = inst->prevNoiseU32[i] - tmpU32no2; // Q(prevQNoise+11)
+      }
+      if (noiseUpdateU32 > tmpU32no1) {
+        noiseUpdateU32 = tmpU32no1; // Q(prevQNoise+11)
+      }
+    }
+    noiseU32[i] = noiseUpdateU32; // Q(prevQNoise+11)
+    if (noiseUpdateU32 > maxNoiseU32) {
+      maxNoiseU32 = noiseUpdateU32;
+    }
+
+    // conservative noise update
+    // // original FLOAT code
+    // if (prob_speech < PROB_RANGE) {
+    // inst->avgMagnPause[i] = inst->avgMagnPause[i] + (1.0 - gamma_pause)*(magn[i] - inst->avgMagnPause[i]);
+    // }
+
+    tmp32no2 = WEBRTC_SPL_SHIFT_W32(inst->avgMagnPause[i], -nShifts);
+    if (nonSpeechProbFinal[i] > ONE_MINUS_PROB_RANGE_Q8) {
+      if (nShifts < 0) {
+        tmp32no1 = (WebRtc_Word32)magnU16[i] - tmp32no2; // Q(qMagn)
+        tmp32no1 = WEBRTC_SPL_MUL_32_16(tmp32no1, ONE_MINUS_GAMMA_PAUSE_Q8); // Q(8+prevQMagn+nShifts)
+        tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no1 + 128, 8); // Q(qMagn)
+      } else {
+        tmp32no1 = WEBRTC_SPL_LSHIFT_W32((WebRtc_Word32)magnU16[i], nShifts)
+                   - inst->avgMagnPause[i]; // Q(qMagn+nShifts)
+        tmp32no1 = WEBRTC_SPL_MUL_32_16(tmp32no1, ONE_MINUS_GAMMA_PAUSE_Q8); // Q(8+prevQMagn+nShifts)
+        tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no1 + (128 << nShifts), 8 + nShifts); // Q(qMagn)
+      }
+      tmp32no2 += tmp32no1; // Q(qMagn)
+    }
+    inst->avgMagnPause[i] = tmp32no2;
+  } // end of frequency loop
+
+  norm32no1 = WebRtcSpl_NormU32(maxNoiseU32);
+  qNoise = inst->prevQNoise + norm32no1 - 5;
+  // done with step 2: noise update
+
+  // STEP 3: compute dd update of prior snr and post snr based on new noise estimate
+  nShifts = inst->prevQNoise + 11 - qMagn;
+  for (i = 0; i < inst->magnLen; i++) {
+    // FLOAT code
+    // // post and prior SNR
+    // curNearSnr = 0.0;
+    // if (magn[i] > noise[i])
+    // {
+    // curNearSnr = magn[i] / (noise[i] + 0.0001) - 1.0;
+    // }
+    // // DD estimate is sum of two terms: current estimate and previous estimate
+    // // directed decision update of snrPrior
+    // snrPrior = DD_PR_SNR * prevNearSnr[i] + (1.0 - DD_PR_SNR) * curNearSnr;
+    // // gain filter
+    // tmpFloat1 = inst->overdrive + snrPrior;
+    // tmpFloat2 = snrPrior / tmpFloat1;
+    // theFilter[i] = tmpFloat2;
+
+    // calculate curNearSnr again, this is necessary because a new noise estimate has been made since then. for the original
+    curNearSnr = 0; // Q11
+    if (nShifts < 0) {
+      // This case is equivalent with magn < noise which implies curNearSnr = 0;
+      tmpMagnU32 = (WebRtc_UWord32)magnU16[i]; // Q(qMagn)
+      tmpNoiseU32 = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], -nShifts); // Q(qMagn)
+    } else if (nShifts > 17) {
+      tmpMagnU32 = WEBRTC_SPL_LSHIFT_U32(magnU16[i], 17); // Q(qMagn+17)
+      tmpNoiseU32 = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], nShifts - 17); // Q(qMagn+17)
+    } else {
+      tmpMagnU32 = WEBRTC_SPL_LSHIFT_U32((WebRtc_UWord32)magnU16[i], nShifts); // Q(qNoise_prev+11)
+      tmpNoiseU32 = noiseU32[i]; // Q(qNoise_prev+11)
+    }
+    if (tmpMagnU32 > tmpNoiseU32) {
+      tmpU32no1 = tmpMagnU32 - tmpNoiseU32; // Q(qCur)
+      norm32no2 = WEBRTC_SPL_MIN(11, WebRtcSpl_NormU32(tmpU32no1));
+      tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(tmpU32no1, norm32no2); // Q(qCur+norm32no2)
+      tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(tmpNoiseU32, 11 - norm32no2); // Q(qCur+norm32no2-11)
+      if (tmpU32no2 > 0) {
+        tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2); // Q11
+      }
+      curNearSnr = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11
+    }
+
+    //directed decision update of priorSnr
+    // FLOAT
+    // priorSnr = DD_PR_SNR * prevNearSnr + (1.0-DD_PR_SNR) * curNearSnr;
+
+    tmpU32no1 = WEBRTC_SPL_UMUL_32_16(prevNearSnr[i], DD_PR_SNR_Q11); // Q22
+    tmpU32no2 = WEBRTC_SPL_UMUL_32_16(curNearSnr, ONE_MINUS_DD_PR_SNR_Q11); // Q22
+    priorSnr = tmpU32no1 + tmpU32no2; // Q22
+
+    //gain filter
+    tmpU32no1 = (WebRtc_UWord32)(inst->overdrive)
+                + WEBRTC_SPL_RSHIFT_U32(priorSnr + 8192, 14); // Q8
+    assert(inst->overdrive > 0);
+    tmpU16no1 = (WebRtc_UWord16)WEBRTC_SPL_UDIV(priorSnr + (tmpU32no1 >> 1), tmpU32no1); // Q14
+    inst->noiseSupFilter[i] = WEBRTC_SPL_SAT(16384, tmpU16no1, inst->denoiseBound); // 16384 = Q14(1.0) // Q14
+
+    // Weight in the parametric Wiener filter during startup
+    if (inst->blockIndex < END_STARTUP_SHORT) {
+      // Weight the two suppression filters
+      tmpU32no1 = WEBRTC_SPL_UMUL_16_16(inst->noiseSupFilter[i],
+                                        (WebRtc_UWord16)inst->blockIndex);
+      tmpU32no2 = WEBRTC_SPL_UMUL_16_16(noiseSupFilterTmp[i],
+                                        (WebRtc_UWord16)(END_STARTUP_SHORT
+                                                         - inst->blockIndex));
+      tmpU32no1 += tmpU32no2;
+      inst->noiseSupFilter[i] = (WebRtc_UWord16)WebRtcSpl_DivU32U16(tmpU32no1,
+                                                                    END_STARTUP_SHORT);
+    }
+  } // end of loop over frequencies
+  //done with step3
+
+  // save noise and magnitude spectrum for next frame
+  inst->prevQNoise = qNoise;
+  inst->prevQMagn = qMagn;
+  if (norm32no1 > 5) {
+    for (i = 0; i < inst->magnLen; i++) {
+      inst->prevNoiseU32[i] = WEBRTC_SPL_LSHIFT_U32(noiseU32[i], norm32no1 - 5); // Q(qNoise+11)
+      inst->prevMagnU16[i] = magnU16[i]; // Q(qMagn)
+    }
+  } else {
+    for (i = 0; i < inst->magnLen; i++) {
+      inst->prevNoiseU32[i] = WEBRTC_SPL_RSHIFT_U32(noiseU32[i], 5 - norm32no1); // Q(qNoise+11)
+      inst->prevMagnU16[i] = magnU16[i]; // Q(qMagn)
+    }
+  }
+
+  WebRtcNsx_DataSynthesis(inst, outFrame);
+#ifdef NS_FILEDEBUG
+  fwrite(outframe, sizeof(short), inst->blockLen10ms, inst->outfile);
+#endif
+
+  //for H band:
+  // only update data buffer, then apply time-domain gain is applied derived from L band
+  if (inst->fs == 32000) {
+    // update analysis buffer for H band
+    // append new data to buffer FX
+    WEBRTC_SPL_MEMCPY_W16(inst->dataBufHBFX, inst->dataBufHBFX + inst->blockLen10ms, inst->anaLen - inst->blockLen10ms);
+    WEBRTC_SPL_MEMCPY_W16(inst->dataBufHBFX + inst->anaLen - inst->blockLen10ms, speechFrameHB, inst->blockLen10ms);
+    // range for averaging low band quantities for H band gain
+
+    gainTimeDomainHB = 16384; // 16384 = Q14(1.0)
+    //average speech prob from low band
+    //average filter gain from low band
+    //avg over second half (i.e., 4->8kHz) of freq. spectrum
+    tmpU32no1 = 0; // Q12
+    tmpU16no1 = 0; // Q8
+    for (i = inst->anaLen2 - (inst->anaLen2 >> 2); i < inst->anaLen2; i++) {
+      tmpU16no1 += nonSpeechProbFinal[i]; // Q8
+      tmpU32no1 += (WebRtc_UWord32)(inst->noiseSupFilter[i]); // Q14
+    }
+    avgProbSpeechHB = (WebRtc_Word16)(4096
+        - WEBRTC_SPL_RSHIFT_U16(tmpU16no1, inst->stages - 7)); // Q12
+    avgFilterGainHB = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_U32(
+        tmpU32no1, inst->stages - 3); // Q14
+
+    // // original FLOAT code
+    // // gain based on speech probability:
+    // avg_prob_speech_tt=(float)2.0*avg_prob_speech-(float)1.0;
+    // gain_mod=(float)0.5*((float)1.0+(float)tanh(avg_prob_speech_tt)); // between 0 and 1
+
+    // gain based on speech probability:
+    // original expression: "0.5 * (1 + tanh(2x-1))"
+    // avgProbSpeechHB has been anyway saturated to a value between 0 and 1 so the other cases don't have to be dealt with
+    // avgProbSpeechHB and gainModHB are in Q12, 3607 = Q12(0.880615234375) which is a zero point of
+    // |0.5 * (1 + tanh(2x-1)) - x| - |0.5 * (1 + tanh(2x-1)) - 0.880615234375| meaning that from that point the error of approximating
+    // the expression with f(x) = x would be greater than the error of approximating the expression with f(x) = 0.880615234375
+    // error: "|0.5 * (1 + tanh(2x-1)) - x| from x=0 to 0.880615234375" -> http://www.wolframalpha.com/input/?i=|0.5+*+(1+%2B+tanh(2x-1))+-+x|+from+x%3D0+to+0.880615234375
+    // and:  "|0.5 * (1 + tanh(2x-1)) - 0.880615234375| from x=0.880615234375 to 1" -> http://www.wolframalpha.com/input/?i=+|0.5+*+(1+%2B+tanh(2x-1))+-+0.880615234375|+from+x%3D0.880615234375+to+1
+    gainModHB = WEBRTC_SPL_MIN(avgProbSpeechHB, 3607);
+
+    // // original FLOAT code
+    // //combine gain with low band gain
+    // if (avg_prob_speech < (float)0.5) {
+    // gain_time_domain_HB=(float)0.5*gain_mod+(float)0.5*avg_filter_gain;
+    // }
+    // else {
+    // gain_time_domain_HB=(float)0.25*gain_mod+(float)0.75*avg_filter_gain;
+    // }
+
+
+    //combine gain with low band gain
+    if (avgProbSpeechHB < 2048) {
+      // 2048 = Q12(0.5)
+      // the next two lines in float are  "gain_time_domain = 0.5 * gain_mod + 0.5 * avg_filter_gain"; Q2(0.5) = 2 equals one left shift
+      gainTimeDomainHB = (gainModHB << 1) + (avgFilterGainHB >> 1); // Q14
+    } else {
+      // "gain_time_domain = 0.25 * gain_mod + 0.75 * agv_filter_gain;"
+      gainTimeDomainHB = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(3, avgFilterGainHB, 2); // 3 = Q2(0.75); Q14
+      gainTimeDomainHB += gainModHB; // Q14
+    }
+    //make sure gain is within flooring range
+    gainTimeDomainHB
+      = WEBRTC_SPL_SAT(16384, gainTimeDomainHB, (WebRtc_Word16)(inst->denoiseBound)); // 16384 = Q14(1.0)
+
+
+    //apply gain
+    for (i = 0; i < inst->blockLen10ms; i++) {
+      outFrameHB[i]
+        = (WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(gainTimeDomainHB, inst->dataBufHBFX[i], 14); // Q0
+    }
+  } // end of H band gain computation
+
+  return 0;
+}
+
+
diff --git a/src/modules/audio_processing/ns/nsx_core.h b/src/modules/audio_processing/ns/nsx_core.h
new file mode 100644
index 0000000..0a0faf9
--- /dev/null
+++ b/src/modules/audio_processing/ns/nsx_core.h
@@ -0,0 +1,222 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_
+
+#include "typedefs.h"
+#include "signal_processing_library.h"
+
+#include "nsx_defines.h"
+
+#ifdef NS_FILEDEBUG
+#include <stdio.h>
+#endif
+
+typedef struct NsxInst_t_ {
+  WebRtc_UWord32          fs;
+
+  const WebRtc_Word16*    window;
+  WebRtc_Word16           analysisBuffer[ANAL_BLOCKL_MAX];
+  WebRtc_Word16           synthesisBuffer[ANAL_BLOCKL_MAX];
+  WebRtc_UWord16          noiseSupFilter[HALF_ANAL_BLOCKL];
+  WebRtc_UWord16          overdrive; /* Q8 */
+  WebRtc_UWord16          denoiseBound; /* Q14 */
+  const WebRtc_Word16*    factor2Table;
+  WebRtc_Word16           noiseEstLogQuantile[SIMULT* HALF_ANAL_BLOCKL];
+  WebRtc_Word16           noiseEstDensity[SIMULT* HALF_ANAL_BLOCKL];
+  WebRtc_Word16           noiseEstCounter[SIMULT];
+  WebRtc_Word16           noiseEstQuantile[HALF_ANAL_BLOCKL];
+
+  WebRtc_Word16           anaLen;
+  int                     anaLen2;
+  int                     magnLen;
+  int                     aggrMode;
+  int                     stages;
+  int                     initFlag;
+  int                     gainMap;
+
+  WebRtc_Word32           maxLrt;
+  WebRtc_Word32           minLrt;
+  WebRtc_Word32           logLrtTimeAvgW32[HALF_ANAL_BLOCKL]; //log lrt factor with time-smoothing in Q8
+  WebRtc_Word32           featureLogLrt;
+  WebRtc_Word32           thresholdLogLrt;
+  WebRtc_Word16           weightLogLrt;
+
+  WebRtc_UWord32          featureSpecDiff;
+  WebRtc_UWord32          thresholdSpecDiff;
+  WebRtc_Word16           weightSpecDiff;
+
+  WebRtc_UWord32          featureSpecFlat;
+  WebRtc_UWord32          thresholdSpecFlat;
+  WebRtc_Word16           weightSpecFlat;
+
+  WebRtc_Word32           avgMagnPause[HALF_ANAL_BLOCKL]; //conservative estimate of noise spectrum
+  WebRtc_UWord32          magnEnergy;
+  WebRtc_UWord32          sumMagn;
+  WebRtc_UWord32          curAvgMagnEnergy;
+  WebRtc_UWord32          timeAvgMagnEnergy;
+  WebRtc_UWord32          timeAvgMagnEnergyTmp;
+
+  WebRtc_UWord32          whiteNoiseLevel;              //initial noise estimate
+  WebRtc_UWord32          initMagnEst[HALF_ANAL_BLOCKL];//initial magnitude spectrum estimate
+  WebRtc_Word32           pinkNoiseNumerator;           //pink noise parameter: numerator
+  WebRtc_Word32           pinkNoiseExp;                 //pink noise parameter: power of freq
+  int                     minNorm;                      //smallest normalization factor
+  int                     zeroInputSignal;              //zero input signal flag
+
+  WebRtc_UWord32          prevNoiseU32[HALF_ANAL_BLOCKL]; //noise spectrum from previous frame
+  WebRtc_UWord16          prevMagnU16[HALF_ANAL_BLOCKL]; //magnitude spectrum from previous frame
+  WebRtc_Word16           priorNonSpeechProb; //prior speech/noise probability // Q14
+
+  int                     blockIndex; //frame index counter
+  int                     modelUpdate; //parameter for updating or estimating thresholds/weights for prior model
+  int                     cntThresUpdate;
+
+  //histograms for parameter estimation
+  WebRtc_Word16           histLrt[HIST_PAR_EST];
+  WebRtc_Word16           histSpecFlat[HIST_PAR_EST];
+  WebRtc_Word16           histSpecDiff[HIST_PAR_EST];
+
+  //quantities for high band estimate
+  WebRtc_Word16           dataBufHBFX[ANAL_BLOCKL_MAX]; /* Q0 */
+
+  int                     qNoise;
+  int                     prevQNoise;
+  int                     prevQMagn;
+  int                     blockLen10ms;
+
+  WebRtc_Word16           real[ANAL_BLOCKL_MAX];
+  WebRtc_Word16           imag[ANAL_BLOCKL_MAX];
+  WebRtc_Word32           energyIn;
+  int                     scaleEnergyIn;
+  int                     normData;
+
+} NsxInst_t;
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/****************************************************************************
+ * WebRtcNsx_InitCore(...)
+ *
+ * This function initializes a noise suppression instance
+ *
+ * Input:
+ *      - inst          : Instance that should be initialized
+ *      - fs            : Sampling frequency
+ *
+ * Output:
+ *      - inst          : Initialized instance
+ *
+ * Return value         :  0 - Ok
+ *                        -1 - Error
+ */
+WebRtc_Word32 WebRtcNsx_InitCore(NsxInst_t* inst, WebRtc_UWord32 fs);
+
+/****************************************************************************
+ * WebRtcNsx_set_policy_core(...)
+ *
+ * This changes the aggressiveness of the noise suppression method.
+ *
+ * Input:
+ *      - inst       : Instance that should be initialized
+ *      - mode       : 0: Mild (6 dB), 1: Medium (10 dB), 2: Aggressive (15 dB)
+ *
+ * Output:
+ *      - inst       : Initialized instance
+ *
+ * Return value      :  0 - Ok
+ *                     -1 - Error
+ */
+int WebRtcNsx_set_policy_core(NsxInst_t* inst, int mode);
+
+/****************************************************************************
+ * WebRtcNsx_ProcessCore
+ *
+ * Do noise suppression.
+ *
+ * Input:
+ *      - inst          : Instance that should be initialized
+ *      - inFrameLow    : Input speech frame for lower band
+ *      - inFrameHigh   : Input speech frame for higher band
+ *
+ * Output:
+ *      - inst          : Updated instance
+ *      - outFrameLow   : Output speech frame for lower band
+ *      - outFrameHigh  : Output speech frame for higher band
+ *
+ * Return value         :  0 - OK
+ *                        -1 - Error
+ */
+int WebRtcNsx_ProcessCore(NsxInst_t* inst,
+                          short* inFrameLow,
+                          short* inFrameHigh,
+                          short* outFrameLow,
+                          short* outFrameHigh);
+
+/****************************************************************************
+ * Some function pointers, for internal functions shared by ARM NEON and 
+ * generic C code.
+ */
+// Noise Estimation.
+typedef void (*NoiseEstimation)(NsxInst_t* inst,
+                                uint16_t* magn,
+                                uint32_t* noise,
+                                int16_t* q_noise);
+extern NoiseEstimation WebRtcNsx_NoiseEstimation;
+
+// Filter the data in the frequency domain, and create spectrum.
+typedef void (*PrepareSpectrum)(NsxInst_t* inst,
+                                int16_t* freq_buff);
+extern PrepareSpectrum WebRtcNsx_PrepareSpectrum;
+
+// For the noise supression process, synthesis, read out fully processed
+// segment, and update synthesis buffer.
+typedef void (*SynthesisUpdate)(NsxInst_t* inst,
+                                int16_t* out_frame,
+                                int16_t gain_factor);
+extern SynthesisUpdate WebRtcNsx_SynthesisUpdate;
+
+// Update analysis buffer for lower band, and window data before FFT.
+typedef void (*AnalysisUpdate)(NsxInst_t* inst,
+                               int16_t* out,
+                               int16_t* new_speech);
+extern AnalysisUpdate WebRtcNsx_AnalysisUpdate;
+
+// Denormalize the input buffer.
+typedef void (*Denormalize)(NsxInst_t* inst,
+                            int16_t* in,
+                            int factor);
+extern Denormalize WebRtcNsx_Denormalize;
+
+// Create a complex number buffer, as the intput interleaved with zeros,
+// and normalize it.
+typedef void (*CreateComplexBuffer)(NsxInst_t* inst,
+                                    int16_t* in,
+                                    int16_t* out);
+extern CreateComplexBuffer WebRtcNsx_CreateComplexBuffer;
+
+/****************************************************************************
+ * Initialization of the above function pointers for ARM Neon.
+ */
+void WebRtcNsx_InitNeon(void);
+
+extern const WebRtc_Word16 WebRtcNsx_kLogTable[9];
+extern const WebRtc_Word16 WebRtcNsx_kLogTableFrac[256];
+extern const WebRtc_Word16 WebRtcNsx_kCounterDiv[201];
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_
diff --git a/src/modules/audio_processing/ns/nsx_core_neon.c b/src/modules/audio_processing/ns/nsx_core_neon.c
new file mode 100644
index 0000000..2f85abd
--- /dev/null
+++ b/src/modules/audio_processing/ns/nsx_core_neon.c
@@ -0,0 +1,734 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "nsx_core.h"
+
+#include <arm_neon.h>
+#include <assert.h>
+
+// Update the noise estimation information.
+static void UpdateNoiseEstimateNeon(NsxInst_t* inst, int offset) {
+  int i = 0;
+  const int16_t kExp2Const = 11819; // Q13
+  int16_t* ptr_noiseEstLogQuantile = NULL;
+  int16_t* ptr_noiseEstQuantile = NULL;
+  int16x4_t kExp2Const16x4 = vdup_n_s16(kExp2Const);
+  int32x4_t twentyOne32x4 = vdupq_n_s32(21);
+  int32x4_t constA32x4 = vdupq_n_s32(0x1fffff);
+  int32x4_t constB32x4 = vdupq_n_s32(0x200000);
+
+  int16_t tmp16 = WebRtcSpl_MaxValueW16(inst->noiseEstLogQuantile + offset,
+                                        inst->magnLen);
+
+  // Guarantee a Q-domain as high as possible and still fit in int16
+  inst->qNoise = 14 - (int) WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2Const,
+                                                                 tmp16,
+                                                                 21);
+
+  int32x4_t qNoise32x4 = vdupq_n_s32(inst->qNoise);
+
+  for (ptr_noiseEstLogQuantile = &inst->noiseEstLogQuantile[offset],
+       ptr_noiseEstQuantile = &inst->noiseEstQuantile[0];
+       ptr_noiseEstQuantile < &inst->noiseEstQuantile[inst->magnLen - 3];
+       ptr_noiseEstQuantile += 4, ptr_noiseEstLogQuantile += 4) {
+
+    // tmp32no2 = WEBRTC_SPL_MUL_16_16(kExp2Const,
+    //                                inst->noiseEstLogQuantile[offset + i]);
+    int16x4_t v16x4 = vld1_s16(ptr_noiseEstLogQuantile);
+    int32x4_t v32x4B = vmull_s16(v16x4, kExp2Const16x4);
+
+    // tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac
+    int32x4_t v32x4A = vandq_s32(v32x4B, constA32x4);
+    v32x4A = vorrq_s32(v32x4A, constB32x4);
+
+    // tmp16 = (int16_t) WEBRTC_SPL_RSHIFT_W32(tmp32no2, 21);
+    v32x4B = vshrq_n_s32(v32x4B, 21);
+
+    // tmp16 -= 21;// shift 21 to get result in Q0
+    v32x4B = vsubq_s32(v32x4B, twentyOne32x4);
+
+    // tmp16 += (int16_t) inst->qNoise;
+    // shift to get result in Q(qNoise)
+    v32x4B = vaddq_s32(v32x4B, qNoise32x4);
+
+    // if (tmp16 < 0) {
+    //   tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no1, -tmp16);
+    // } else {
+    //   tmp32no1 = WEBRTC_SPL_LSHIFT_W32(tmp32no1, tmp16);
+    // }
+    v32x4B = vshlq_s32(v32x4A, v32x4B);
+
+    // tmp16 = WebRtcSpl_SatW32ToW16(tmp32no1);
+    v16x4 = vqmovn_s32(v32x4B);
+
+    //inst->noiseEstQuantile[i] = tmp16;
+    vst1_s16(ptr_noiseEstQuantile, v16x4);
+  }
+
+  // Last iteration:
+
+  // inst->quantile[i]=exp(inst->lquantile[offset+i]);
+  // in Q21
+  int32_t tmp32no2 = WEBRTC_SPL_MUL_16_16(kExp2Const,
+                                          *ptr_noiseEstLogQuantile);
+  int32_t tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac
+
+  tmp16 = (int16_t) WEBRTC_SPL_RSHIFT_W32(tmp32no2, 21);
+  tmp16 -= 21;// shift 21 to get result in Q0
+  tmp16 += (int16_t) inst->qNoise; //shift to get result in Q(qNoise)
+  if (tmp16 < 0) {
+    tmp32no1 = WEBRTC_SPL_RSHIFT_W32(tmp32no1, -tmp16);
+  } else {
+    tmp32no1 = WEBRTC_SPL_LSHIFT_W32(tmp32no1, tmp16);
+  }
+  *ptr_noiseEstQuantile = WebRtcSpl_SatW32ToW16(tmp32no1);
+}
+
+// Noise Estimation
+static void NoiseEstimationNeon(NsxInst_t* inst,
+                                uint16_t* magn,
+                                uint32_t* noise,
+                                int16_t* q_noise) {
+  int16_t lmagn[HALF_ANAL_BLOCKL], counter, countDiv;
+  int16_t countProd, delta, zeros, frac;
+  int16_t log2, tabind, logval, tmp16, tmp16no1, tmp16no2;
+  const int16_t log2_const = 22713;
+  const int16_t width_factor = 21845;
+
+  int i, s, offset;
+
+  tabind = inst->stages - inst->normData;
+  assert(tabind < 9);
+  assert(tabind > -9);
+  if (tabind < 0) {
+    logval = -WebRtcNsx_kLogTable[-tabind];
+  } else {
+    logval = WebRtcNsx_kLogTable[tabind];
+  }
+
+  int16x8_t logval_16x8 = vdupq_n_s16(logval);
+
+  // lmagn(i)=log(magn(i))=log(2)*log2(magn(i))
+  // magn is in Q(-stages), and the real lmagn values are:
+  // real_lmagn(i)=log(magn(i)*2^stages)=log(magn(i))+log(2^stages)
+  // lmagn in Q8
+  for (i = 0; i < inst->magnLen; i++) {
+    if (magn[i]) {
+      zeros = WebRtcSpl_NormU32((uint32_t)magn[i]);
+      frac = (int16_t)((((uint32_t)magn[i] << zeros)
+                        & 0x7FFFFFFF) >> 23);
+      assert(frac < 256);
+      // log2(magn(i))
+      log2 = (int16_t)(((31 - zeros) << 8)
+                       + WebRtcNsx_kLogTableFrac[frac]);
+      // log2(magn(i))*log(2)
+      lmagn[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(log2, log2_const, 15);
+      // + log(2^stages)
+      lmagn[i] += logval;
+    } else {
+      lmagn[i] = logval;
+    }
+  }
+
+  int16x4_t Q3_16x4  = vdup_n_s16(3);
+  int16x8_t WIDTHQ8_16x8 = vdupq_n_s16(WIDTH_Q8);
+  int16x8_t WIDTHFACTOR_16x8 = vdupq_n_s16(width_factor);
+
+  int16_t factor = FACTOR_Q7;
+  if (inst->blockIndex < END_STARTUP_LONG)
+    factor = FACTOR_Q7_STARTUP;
+
+  // Loop over simultaneous estimates
+  for (s = 0; s < SIMULT; s++) {
+    offset = s * inst->magnLen;
+
+    // Get counter values from state
+    counter = inst->noiseEstCounter[s];
+    assert(counter < 201);
+    countDiv = WebRtcNsx_kCounterDiv[counter];
+    countProd = (int16_t)WEBRTC_SPL_MUL_16_16(counter, countDiv);
+
+    // quant_est(...)
+    int16_t deltaBuff[8];
+    int16x4_t tmp16x4_0;
+    int16x4_t tmp16x4_1;
+    int16x4_t countDiv_16x4 = vdup_n_s16(countDiv);
+    int16x8_t countProd_16x8 = vdupq_n_s16(countProd);
+    int16x8_t tmp16x8_0 = vdupq_n_s16(countDiv);
+    int16x8_t prod16x8 = vqrdmulhq_s16(WIDTHFACTOR_16x8, tmp16x8_0);
+    int16x8_t tmp16x8_1;
+    int16x8_t tmp16x8_2;
+    int16x8_t tmp16x8_3;
+    int16x8_t tmp16x8_4;
+    int16x8_t tmp16x8_5;
+    int32x4_t tmp32x4;
+
+    for (i = 0; i < inst->magnLen - 7; i += 8) {
+      // Compute delta.
+      // Smaller step size during startup. This prevents from using
+      // unrealistic values causing overflow.
+      tmp16x8_0 = vdupq_n_s16(factor);
+      vst1q_s16(deltaBuff, tmp16x8_0);
+
+      int j;
+      for (j = 0; j < 8; j++) {
+        if (inst->noiseEstDensity[offset + i + j] > 512) {
+          // Get values for deltaBuff by shifting intead of dividing.
+          int factor = WebRtcSpl_NormW16(inst->noiseEstDensity[offset + i + j]);
+          deltaBuff[j] = (int16_t)(FACTOR_Q16 >> (14 - factor));
+        }
+      }
+
+      // Update log quantile estimate
+
+      // tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(delta, countDiv, 14);
+      tmp32x4 = vmull_s16(vld1_s16(&deltaBuff[0]), countDiv_16x4);
+      tmp16x4_1 = vshrn_n_s32(tmp32x4, 14);
+      tmp32x4 = vmull_s16(vld1_s16(&deltaBuff[4]), countDiv_16x4);
+      tmp16x4_0 = vshrn_n_s32(tmp32x4, 14);
+      tmp16x8_0 = vcombine_s16(tmp16x4_1, tmp16x4_0); // Keep for several lines.
+
+      // prepare for the "if" branch
+      // tmp16 += 2;
+      // tmp16_1 = (Word16)(tmp16>>2);
+      tmp16x8_1 = vrshrq_n_s16(tmp16x8_0, 2);
+
+      // inst->noiseEstLogQuantile[offset+i] + tmp16_1;
+      tmp16x8_2 = vld1q_s16(&inst->noiseEstLogQuantile[offset + i]); // Keep
+      tmp16x8_1 = vaddq_s16(tmp16x8_2, tmp16x8_1); // Keep for several lines
+
+      // Prepare for the "else" branch
+      // tmp16 += 1;
+      // tmp16_1 = (Word16)(tmp16>>1);
+      tmp16x8_0 = vrshrq_n_s16(tmp16x8_0, 1);
+
+      // tmp16_2 = (Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16_1,3,1);
+      tmp32x4 = vmull_s16(vget_low_s16(tmp16x8_0), Q3_16x4);
+      tmp16x4_1 = vshrn_n_s32(tmp32x4, 1);
+
+      // tmp16_2 = (Word16)WEBRTC_SPL_MUL_16_16_RSFT(tmp16_1,3,1);
+      tmp32x4 = vmull_s16(vget_high_s16(tmp16x8_0), Q3_16x4);
+      tmp16x4_0 = vshrn_n_s32(tmp32x4, 1);
+
+      // inst->noiseEstLogQuantile[offset + i] - tmp16_2;
+      tmp16x8_0 = vcombine_s16(tmp16x4_1, tmp16x4_0); // keep
+      tmp16x8_0 = vsubq_s16(tmp16x8_2, tmp16x8_0);
+
+      // logval is the smallest fixed point representation we can have. Values
+      // below that will correspond to values in the interval [0, 1], which
+      // can't possibly occur.
+      tmp16x8_0 = vmaxq_s16(tmp16x8_0, logval_16x8);
+
+      // Do the if-else branches:
+      tmp16x8_3 = vld1q_s16(&lmagn[i]); // keep for several lines
+      tmp16x8_5 = vsubq_s16(tmp16x8_3, tmp16x8_2);
+      __asm__("vcgt.s16 %q0, %q1, #0"::"w"(tmp16x8_4), "w"(tmp16x8_5));
+      __asm__("vbit %q0, %q1, %q2"::
+              "w"(tmp16x8_2), "w"(tmp16x8_1), "w"(tmp16x8_4));
+      __asm__("vbif %q0, %q1, %q2"::
+              "w"(tmp16x8_2), "w"(tmp16x8_0), "w"(tmp16x8_4));
+      vst1q_s16(&inst->noiseEstLogQuantile[offset + i], tmp16x8_2);
+
+      // Update density estimate
+      // tmp16_1 + tmp16_2
+      tmp16x8_1 = vld1q_s16(&inst->noiseEstDensity[offset + i]);
+      tmp16x8_0 = vqrdmulhq_s16(tmp16x8_1, countProd_16x8);
+      tmp16x8_0 = vaddq_s16(tmp16x8_0, prod16x8);
+
+      // lmagn[i] - inst->noiseEstLogQuantile[offset + i]
+      tmp16x8_3 = vsubq_s16(tmp16x8_3, tmp16x8_2);
+      tmp16x8_3 = vabsq_s16(tmp16x8_3);
+      tmp16x8_4 = vcgtq_s16(WIDTHQ8_16x8, tmp16x8_3);
+      __asm__("vbit %q0, %q1, %q2"::
+              "w"(tmp16x8_1), "w"(tmp16x8_0), "w"(tmp16x8_4));
+      vst1q_s16(&inst->noiseEstDensity[offset + i], tmp16x8_1);
+    } // End loop over magnitude spectrum
+
+    // Last iteration over magnitude spectrum:
+    // compute delta
+    if (inst->noiseEstDensity[offset + i] > 512) {
+      // Get values for deltaBuff by shifting intead of dividing.
+      int factor = WebRtcSpl_NormW16(inst->noiseEstDensity[offset + i]);
+      delta = (int16_t)(FACTOR_Q16 >> (14 - factor));
+    } else {
+      delta = FACTOR_Q7;
+      if (inst->blockIndex < END_STARTUP_LONG) {
+        // Smaller step size during startup. This prevents from using
+        // unrealistic values causing overflow.
+        delta = FACTOR_Q7_STARTUP;
+      }
+    }
+    // update log quantile estimate
+    tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(delta, countDiv, 14);
+    if (lmagn[i] > inst->noiseEstLogQuantile[offset + i]) {
+      // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2
+      // CounterDiv=1/(inst->counter[s]+1) in Q15
+      tmp16 += 2;
+      tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 2);
+      inst->noiseEstLogQuantile[offset + i] += tmp16no1;
+    } else {
+      tmp16 += 1;
+      tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 1);
+      // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2
+      tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, 3, 1);
+      inst->noiseEstLogQuantile[offset + i] -= tmp16no2;
+      if (inst->noiseEstLogQuantile[offset + i] < logval) {
+        // logval is the smallest fixed point representation we can have.
+        // Values below that will correspond to values in the interval
+        // [0, 1], which can't possibly occur.
+        inst->noiseEstLogQuantile[offset + i] = logval;
+      }
+    }
+
+    // update density estimate
+    if (WEBRTC_SPL_ABS_W16(lmagn[i] - inst->noiseEstLogQuantile[offset + i])
+        < WIDTH_Q8) {
+      tmp16no1 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                   inst->noiseEstDensity[offset + i], countProd, 15);
+      tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                   width_factor, countDiv, 15);
+      inst->noiseEstDensity[offset + i] = tmp16no1 + tmp16no2;
+    }
+
+
+    if (counter >= END_STARTUP_LONG) {
+      inst->noiseEstCounter[s] = 0;
+      if (inst->blockIndex >= END_STARTUP_LONG) {
+        UpdateNoiseEstimateNeon(inst, offset);
+      }
+    }
+    inst->noiseEstCounter[s]++;
+
+  } // end loop over simultaneous estimates
+
+  // Sequentially update the noise during startup
+  if (inst->blockIndex < END_STARTUP_LONG) {
+    UpdateNoiseEstimateNeon(inst, offset);
+  }
+
+  for (i = 0; i < inst->magnLen; i++) {
+    noise[i] = (uint32_t)(inst->noiseEstQuantile[i]); // Q(qNoise)
+  }
+  (*q_noise) = (int16_t)inst->qNoise;
+}
+
+// Filter the data in the frequency domain, and create spectrum.
+static void PrepareSpectrumNeon(NsxInst_t* inst, int16_t* freq_buf) {
+
+  // (1) Filtering.
+
+  // Fixed point C code for the next block is as follows:
+  // for (i = 0; i < inst->magnLen; i++) {
+  //   inst->real[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(inst->real[i],
+  //      (int16_t)(inst->noiseSupFilter[i]), 14); // Q(normData-stages)
+  //   inst->imag[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(inst->imag[i],
+  //      (int16_t)(inst->noiseSupFilter[i]), 14); // Q(normData-stages)
+  // }
+
+  int16_t* ptr_real = &inst->real[0];
+  int16_t* ptr_imag = &inst->imag[0];
+  uint16_t* ptr_noiseSupFilter = &inst->noiseSupFilter[0];
+
+  // Filter the rest in the frequency domain.
+  for (; ptr_real < &inst->real[inst->magnLen - 1];) {
+    // Loop unrolled once. Both pointers are incremented by 4 twice.
+    __asm__ __volatile__(
+      "vld1.16 d20, [%[ptr_real]]\n\t"
+      "vld1.16 d22, [%[ptr_imag]]\n\t"
+      "vld1.16 d23, [%[ptr_noiseSupFilter]]!\n\t"
+      "vmull.s16 q10, d20, d23\n\t"
+      "vmull.s16 q11, d22, d23\n\t"
+      "vshrn.s32 d20, q10, #14\n\t"
+      "vshrn.s32 d22, q11, #14\n\t"
+      "vst1.16 d20, [%[ptr_real]]!\n\t"
+      "vst1.16 d22, [%[ptr_imag]]!\n\t"
+
+      "vld1.16 d18, [%[ptr_real]]\n\t"
+      "vld1.16 d24, [%[ptr_imag]]\n\t"
+      "vld1.16 d25, [%[ptr_noiseSupFilter]]!\n\t"
+      "vmull.s16 q9, d18, d25\n\t"
+      "vmull.s16 q12, d24, d25\n\t"
+      "vshrn.s32 d18, q9, #14\n\t"
+      "vshrn.s32 d24, q12, #14\n\t"
+      "vst1.16 d18, [%[ptr_real]]!\n\t"
+      "vst1.16 d24, [%[ptr_imag]]!\n\t"
+
+      // Specify constraints.
+      :[ptr_imag]"+r"(ptr_imag),
+       [ptr_real]"+r"(ptr_real),
+       [ptr_noiseSupFilter]"+r"(ptr_noiseSupFilter)
+      :
+      :"d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25",
+       "q9", "q10", "q11", "q12"
+    );
+  }
+
+  // Filter the last pair of elements in the frequency domain.
+  *ptr_real = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(*ptr_real,
+      (int16_t)(*ptr_noiseSupFilter), 14); // Q(normData-stages)
+  *ptr_imag = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(*ptr_imag,
+      (int16_t)(*ptr_noiseSupFilter), 14); // Q(normData-stages)
+
+  // (2) Create spectrum.
+
+  // Fixed point C code for the rest of the function is as follows:
+  // freq_buf[0] = inst->real[0];
+  // freq_buf[1] = -inst->imag[0];
+  // for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) {
+  //   tmp16 = (inst->anaLen << 1) - j;
+  //   freq_buf[j] = inst->real[i];
+  //   freq_buf[j + 1] = -inst->imag[i];
+  //   freq_buf[tmp16] = inst->real[i];
+  //   freq_buf[tmp16 + 1] = inst->imag[i];
+  // }
+  // freq_buf[inst->anaLen] = inst->real[inst->anaLen2];
+  // freq_buf[inst->anaLen + 1] = -inst->imag[inst->anaLen2];
+
+  freq_buf[0] = inst->real[0];
+  freq_buf[1] = -inst->imag[0];
+
+  int offset = -16;
+  int16_t* ptr_realImag1 = &freq_buf[2];
+  int16_t* ptr_realImag2 = ptr_realImag2 = &freq_buf[(inst->anaLen << 1) - 8];
+  ptr_real = &inst->real[1];
+  ptr_imag = &inst->imag[1];
+  for (; ptr_real < &inst->real[inst->anaLen2 - 11];) {
+    // Loop unrolled once. All pointers are incremented twice.
+    __asm__ __volatile__(
+      "vld1.16 d22, [%[ptr_real]]!\n\t"
+      "vld1.16 d23, [%[ptr_imag]]!\n\t"
+      // Negate and interleave:
+      "vmov.s16 d20, d22\n\t"
+      "vneg.s16 d21, d23\n\t"
+      "vzip.16 d20, d21\n\t"
+      // Write 8 elements to &freq_buf[j]
+      "vst1.16 {d20, d21}, [%[ptr_realImag1]]!\n\t"
+      // Interleave and reverse elements:
+      "vzip.16 d22, d23\n\t"
+      "vrev64.32 d18, d23\n\t"
+      "vrev64.32 d19, d22\n\t"
+      // Write 8 elements to &freq_buf[tmp16]
+      "vst1.16 {d18, d19}, [%[ptr_realImag2]], %[offset]\n\t"
+
+      "vld1.16 d22, [%[ptr_real]]!\n\t"
+      "vld1.16 d23, [%[ptr_imag]]!\n\t"
+      // Negate and interleave:
+      "vmov.s16 d20, d22\n\t"
+      "vneg.s16 d21, d23\n\t"
+      "vzip.16 d20, d21\n\t"
+      // Write 8 elements to &freq_buf[j]
+      "vst1.16 {d20, d21}, [%[ptr_realImag1]]!\n\t"
+      // Interleave and reverse elements:
+      "vzip.16 d22, d23\n\t"
+      "vrev64.32 d18, d23\n\t"
+      "vrev64.32 d19, d22\n\t"
+      // Write 8 elements to &freq_buf[tmp16]
+      "vst1.16 {d18, d19}, [%[ptr_realImag2]], %[offset]\n\t"
+
+      // Specify constraints.
+      :[ptr_imag]"+r"(ptr_imag),
+       [ptr_real]"+r"(ptr_real),
+       [ptr_realImag1]"+r"(ptr_realImag1),
+       [ptr_realImag2]"+r"(ptr_realImag2)
+      :[offset]"r"(offset)
+      :"d18", "d19", "d20", "d21", "d22", "d23"
+    );
+  }
+  for (ptr_realImag2 += 6;
+       ptr_real <= &inst->real[inst->anaLen2];
+       ptr_real += 1, ptr_imag += 1, ptr_realImag1 += 2, ptr_realImag2 -= 2) {
+    *ptr_realImag1 = *ptr_real;
+    *(ptr_realImag1 + 1) = -(*ptr_imag);
+    *ptr_realImag2 = *ptr_real;
+    *(ptr_realImag2 + 1) = *ptr_imag;
+  }
+
+  freq_buf[inst->anaLen] = inst->real[inst->anaLen2];
+  freq_buf[inst->anaLen + 1] = -inst->imag[inst->anaLen2];
+}
+
+// Denormalize the input buffer.
+static __inline void DenormalizeNeon(NsxInst_t* inst, int16_t* in, int factor) {
+  int16_t* ptr_real = &inst->real[0];
+  int16_t* ptr_in = &in[0];
+
+  __asm__ __volatile__("vdup.32 q10, %0" ::
+                       "r"((int32_t)(factor - inst->normData)) : "q10");
+  for (; ptr_real < &inst->real[inst->anaLen];) {
+
+    // Loop unrolled once. Both pointers are incremented.
+    __asm__ __volatile__(
+      // tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t)in[j],
+      //                             factor - inst->normData);
+      "vld2.16 {d24, d25}, [%[ptr_in]]!\n\t"
+      "vmovl.s16 q12, d24\n\t"
+      "vshl.s32 q12, q10\n\t"
+      // inst->real[i] = WebRtcSpl_SatW32ToW16(tmp32); // Q0
+      "vqmovn.s32 d24, q12\n\t"
+      "vst1.16 d24, [%[ptr_real]]!\n\t"
+
+      // tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t)in[j],
+      //                             factor - inst->normData);
+      "vld2.16 {d22, d23}, [%[ptr_in]]!\n\t"
+      "vmovl.s16 q11, d22\n\t"
+      "vshl.s32 q11, q10\n\t"
+      // inst->real[i] = WebRtcSpl_SatW32ToW16(tmp32); // Q0
+      "vqmovn.s32 d22, q11\n\t"
+      "vst1.16 d22, [%[ptr_real]]!\n\t"
+
+      // Specify constraints.
+      :[ptr_in]"+r"(ptr_in),
+       [ptr_real]"+r"(ptr_real)
+      :
+      :"d22", "d23", "d24", "d25"
+    );
+  }
+}
+
+// For the noise supress process, synthesis, read out fully processed segment,
+// and update synthesis buffer.
+static void SynthesisUpdateNeon(NsxInst_t* inst,
+                                int16_t* out_frame,
+                                int16_t gain_factor) {
+  int16_t* ptr_real = &inst->real[0];
+  int16_t* ptr_syn = &inst->synthesisBuffer[0];
+  int16_t* ptr_window = &inst->window[0];
+
+  // synthesis
+  __asm__ __volatile__("vdup.16 d24, %0" : : "r"(gain_factor) : "d24");
+  // Loop unrolled once. All pointers are incremented in the assembly code.
+  for (; ptr_syn < &inst->synthesisBuffer[inst->anaLen];) {
+    __asm__ __volatile__(
+      // Load variables.
+      "vld1.16 d22, [%[ptr_real]]!\n\t"
+      "vld1.16 d23, [%[ptr_window]]!\n\t"
+      "vld1.16 d25, [%[ptr_syn]]\n\t"
+      // tmp16a = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+      //           inst->window[i], inst->real[i], 14); // Q0, window in Q14
+      "vmull.s16 q11, d22, d23\n\t"
+      "vrshrn.i32 d22, q11, #14\n\t"
+      // tmp32 = WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(tmp16a, gain_factor, 13);
+      "vmull.s16 q11, d24, d22\n\t"
+      // tmp16b = WebRtcSpl_SatW32ToW16(tmp32); // Q0
+      "vqrshrn.s32 d22, q11, #13\n\t"
+      // inst->synthesisBuffer[i] = WEBRTC_SPL_ADD_SAT_W16(
+      //     inst->synthesisBuffer[i], tmp16b); // Q0
+      "vqadd.s16 d25, d22\n\t"
+      "vst1.16 d25, [%[ptr_syn]]!\n\t"
+
+      // Load variables.
+      "vld1.16 d26, [%[ptr_real]]!\n\t"
+      "vld1.16 d27, [%[ptr_window]]!\n\t"
+      "vld1.16 d28, [%[ptr_syn]]\n\t"
+      // tmp16a = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+      //           inst->window[i], inst->real[i], 14); // Q0, window in Q14
+      "vmull.s16 q13, d26, d27\n\t"
+      "vrshrn.i32 d26, q13, #14\n\t"
+      // tmp32 = WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(tmp16a, gain_factor, 13);
+      "vmull.s16 q13, d24, d26\n\t"
+      // tmp16b = WebRtcSpl_SatW32ToW16(tmp32); // Q0
+      "vqrshrn.s32 d26, q13, #13\n\t"
+      // inst->synthesisBuffer[i] = WEBRTC_SPL_ADD_SAT_W16(
+      //     inst->synthesisBuffer[i], tmp16b); // Q0
+      "vqadd.s16 d28, d26\n\t"
+      "vst1.16 d28, [%[ptr_syn]]!\n\t"
+
+      // Specify constraints.
+      :[ptr_real]"+r"(ptr_real),
+       [ptr_window]"+r"(ptr_window),
+       [ptr_syn]"+r"(ptr_syn)
+      :
+      :"d22", "d23", "d24", "d25", "d26", "d27", "d28", "q11", "q12", "q13"
+    );
+  }
+
+  int16_t* ptr_out = &out_frame[0];
+  ptr_syn = &inst->synthesisBuffer[0];
+  // read out fully processed segment
+  for (; ptr_syn < &inst->synthesisBuffer[inst->blockLen10ms];) {
+    // Loop unrolled once. Both pointers are incremented in the assembly code.
+    __asm__ __volatile__(
+      // out_frame[i] = inst->synthesisBuffer[i]; // Q0
+      "vld1.16 {d22, d23}, [%[ptr_syn]]!\n\t"
+      "vld1.16 {d24, d25}, [%[ptr_syn]]!\n\t"
+      "vst1.16 {d22, d23}, [%[ptr_out]]!\n\t"
+      "vst1.16 {d24, d25}, [%[ptr_out]]!\n\t"
+      :[ptr_syn]"+r"(ptr_syn),
+       [ptr_out]"+r"(ptr_out)
+      :
+      :"d22", "d23", "d24", "d25"
+    );
+  }
+
+  // Update synthesis buffer.
+  // C code:
+  // WEBRTC_SPL_MEMCPY_W16(inst->synthesisBuffer,
+  //                      inst->synthesisBuffer + inst->blockLen10ms,
+  //                      inst->anaLen - inst->blockLen10ms);
+  ptr_out = &inst->synthesisBuffer[0],
+  ptr_syn = &inst->synthesisBuffer[inst->blockLen10ms];
+  for (; ptr_syn < &inst->synthesisBuffer[inst->anaLen];) {
+    // Loop unrolled once. Both pointers are incremented in the assembly code.
+    __asm__ __volatile__(
+      "vld1.16 {d22, d23}, [%[ptr_syn]]!\n\t"
+      "vld1.16 {d24, d25}, [%[ptr_syn]]!\n\t"
+      "vst1.16 {d22, d23}, [%[ptr_out]]!\n\t"
+      "vst1.16 {d24, d25}, [%[ptr_out]]!\n\t"
+      :[ptr_syn]"+r"(ptr_syn),
+       [ptr_out]"+r"(ptr_out)
+      :
+      :"d22", "d23", "d24", "d25"
+    );
+  }
+
+  // C code:
+  // WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer
+  //    + inst->anaLen - inst->blockLen10ms, inst->blockLen10ms);
+  __asm__ __volatile__("vdup.16 q10, %0" : : "r"(0) : "q10");
+  for (; ptr_out < &inst->synthesisBuffer[inst->anaLen];) {
+    // Loop unrolled once. Pointer is incremented in the assembly code.
+    __asm__ __volatile__(
+      "vst1.16 {d20, d21}, [%[ptr_out]]!\n\t"
+      "vst1.16 {d20, d21}, [%[ptr_out]]!\n\t"
+      :[ptr_out]"+r"(ptr_out)
+      :
+      :"d20", "d21"
+    );
+  }
+}
+
+// Update analysis buffer for lower band, and window data before FFT.
+static void AnalysisUpdateNeon(NsxInst_t* inst,
+                               int16_t* out,
+                               int16_t* new_speech) {
+
+  int16_t* ptr_ana = &inst->analysisBuffer[inst->blockLen10ms];
+  int16_t* ptr_out = &inst->analysisBuffer[0];
+
+  // For lower band update analysis buffer.
+  // WEBRTC_SPL_MEMCPY_W16(inst->analysisBuffer,
+  //                      inst->analysisBuffer + inst->blockLen10ms,
+  //                      inst->anaLen - inst->blockLen10ms);
+  for (; ptr_out < &inst->analysisBuffer[inst->anaLen - inst->blockLen10ms];) {
+    // Loop unrolled once, so both pointers are incremented by 8 twice.
+    __asm__ __volatile__(
+      "vld1.16 {d20, d21}, [%[ptr_ana]]!\n\t"
+      "vst1.16 {d20, d21}, [%[ptr_out]]!\n\t"
+      "vld1.16 {d22, d23}, [%[ptr_ana]]!\n\t"
+      "vst1.16 {d22, d23}, [%[ptr_out]]!\n\t"
+      :[ptr_ana]"+r"(ptr_ana),
+       [ptr_out]"+r"(ptr_out)
+      :
+      :"d20", "d21", "d22", "d23"
+    );
+  }
+
+  // WEBRTC_SPL_MEMCPY_W16(inst->analysisBuffer
+  //    + inst->anaLen - inst->blockLen10ms, new_speech, inst->blockLen10ms);
+  for (ptr_ana = new_speech; ptr_out < &inst->analysisBuffer[inst->anaLen];) {
+    // Loop unrolled once, so both pointers are incremented by 8 twice.
+    __asm__ __volatile__(
+      "vld1.16 {d20, d21}, [%[ptr_ana]]!\n\t"
+      "vst1.16 {d20, d21}, [%[ptr_out]]!\n\t"
+      "vld1.16 {d22, d23}, [%[ptr_ana]]!\n\t"
+      "vst1.16 {d22, d23}, [%[ptr_out]]!\n\t"
+      :[ptr_ana]"+r"(ptr_ana),
+       [ptr_out]"+r"(ptr_out)
+      :
+      :"d20", "d21", "d22", "d23"
+    );
+  }
+
+  // Window data before FFT
+  int16_t* ptr_window = &inst->window[0];
+  ptr_out = &out[0];
+  ptr_ana = &inst->analysisBuffer[0];
+  for (; ptr_out < &out[inst->anaLen];) {
+
+    // Loop unrolled once, so all pointers are incremented by 4 twice.
+    __asm__ __volatile__(
+      "vld1.16 d20, [%[ptr_ana]]!\n\t"
+      "vld1.16 d21, [%[ptr_window]]!\n\t"
+      // out[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+      //           inst->window[i], inst->analysisBuffer[i], 14); // Q0
+      "vmull.s16 q10, d20, d21\n\t"
+      "vrshrn.i32 d20, q10, #14\n\t"
+      "vst1.16 d20, [%[ptr_out]]!\n\t"
+
+      "vld1.16 d22, [%[ptr_ana]]!\n\t"
+      "vld1.16 d23, [%[ptr_window]]!\n\t"
+      // out[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+      //           inst->window[i], inst->analysisBuffer[i], 14); // Q0
+      "vmull.s16 q11, d22, d23\n\t"
+      "vrshrn.i32 d22, q11, #14\n\t"
+      "vst1.16 d22, [%[ptr_out]]!\n\t"
+
+      // Specify constraints.
+      :[ptr_ana]"+r"(ptr_ana),
+       [ptr_window]"+r"(ptr_window),
+       [ptr_out]"+r"(ptr_out)
+      :
+      :"d20", "d21", "d22", "d23", "q10", "q11"
+    );
+  }
+}
+
+// Create a complex number buffer (out[]) as the intput (in[]) interleaved with
+// zeros, and normalize it.
+static __inline void CreateComplexBufferNeon(NsxInst_t* inst,
+                                             int16_t* in,
+                                             int16_t* out) {
+  int16_t* ptr_out = &out[0];
+  int16_t* ptr_in = &in[0];
+
+  __asm__ __volatile__("vdup.16 d25, %0" : : "r"(0) : "d25");
+  __asm__ __volatile__("vdup.16 q10, %0" : : "r"(inst->normData) : "q10");
+  for (; ptr_in < &in[inst->anaLen];) {
+
+    // Loop unrolled once, so ptr_in is incremented by 8 twice,
+    // and ptr_out is incremented by 8 four times.
+    __asm__ __volatile__(
+      // out[j] = WEBRTC_SPL_LSHIFT_W16(in[i], inst->normData); // Q(normData)
+      "vld1.16 {d22, d23}, [%[ptr_in]]!\n\t"
+      "vshl.s16 q11, q10\n\t"
+      "vmov d24, d23\n\t"
+
+      // out[j + 1] = 0; // Insert zeros in imaginary part
+      "vmov d23, d25\n\t"
+      "vst2.16 {d22, d23}, [%[ptr_out]]!\n\t"
+      "vst2.16 {d24, d25}, [%[ptr_out]]!\n\t"
+
+      // out[j] = WEBRTC_SPL_LSHIFT_W16(in[i], inst->normData); // Q(normData)
+      "vld1.16 {d22, d23}, [%[ptr_in]]!\n\t"
+      "vshl.s16 q11, q10\n\t"
+      "vmov d24, d23\n\t"
+
+      // out[j + 1] = 0; // Insert zeros in imaginary part
+      "vmov d23, d25\n\t"
+      "vst2.16 {d22, d23}, [%[ptr_out]]!\n\t"
+      "vst2.16 {d24, d25}, [%[ptr_out]]!\n\t"
+
+      // Specify constraints.
+      :[ptr_in]"+r"(ptr_in),
+       [ptr_out]"+r"(ptr_out)
+      :
+      :"d22", "d23", "d24", "d25", "q10", "q11"
+    );
+  }
+}
+
+void WebRtcNsx_InitNeon(void) {
+  WebRtcNsx_NoiseEstimation = NoiseEstimationNeon;
+  WebRtcNsx_PrepareSpectrum = PrepareSpectrumNeon;
+  WebRtcNsx_SynthesisUpdate = SynthesisUpdateNeon;
+  WebRtcNsx_AnalysisUpdate = AnalysisUpdateNeon;
+  WebRtcNsx_Denormalize = DenormalizeNeon;
+  WebRtcNsx_CreateComplexBuffer = CreateComplexBufferNeon;
+}
diff --git a/src/modules/audio_processing/ns/main/source/nsx_defines.h b/src/modules/audio_processing/ns/nsx_defines.h
similarity index 97%
rename from src/modules/audio_processing/ns/main/source/nsx_defines.h
rename to src/modules/audio_processing/ns/nsx_defines.h
index 58796b9..cd1e3bf 100644
--- a/src/modules/audio_processing/ns/main/source/nsx_defines.h
+++ b/src/modules/audio_processing/ns/nsx_defines.h
@@ -18,6 +18,7 @@
 #define END_STARTUP_SHORT       50
 #define FACTOR_Q16              (WebRtc_Word32)2621440 // 40 in Q16
 #define FACTOR_Q7               (WebRtc_Word16)5120 // 40 in Q7
+#define FACTOR_Q7_STARTUP       (WebRtc_Word16)1024 // 8 in Q7
 #define WIDTH_Q8                3 // 0.01 in Q8 (or 25 )
 //PARAMETERS FOR NEW METHOD
 #define DD_PR_SNR_Q11           2007 // ~= Q11(0.98) DD update of prior SNR
diff --git a/src/modules/audio_processing/ns/windows_private.h b/src/modules/audio_processing/ns/windows_private.h
new file mode 100644
index 0000000..44c2e84
--- /dev/null
+++ b/src/modules/audio_processing/ns/windows_private.h
@@ -0,0 +1,574 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_
+
+// Hanning window for 4ms 16kHz
+static const float kHanning64w128[128] = {
+  0.00000000000000f, 0.02454122852291f, 0.04906767432742f,
+  0.07356456359967f, 0.09801714032956f, 0.12241067519922f,
+  0.14673047445536f, 0.17096188876030f, 0.19509032201613f,
+  0.21910124015687f, 0.24298017990326f, 0.26671275747490f,
+  0.29028467725446f, 0.31368174039889f, 0.33688985339222f,
+  0.35989503653499f, 0.38268343236509f, 0.40524131400499f,
+  0.42755509343028f, 0.44961132965461f, 0.47139673682600f,
+  0.49289819222978f, 0.51410274419322f, 0.53499761988710f,
+  0.55557023301960f, 0.57580819141785f, 0.59569930449243f,
+  0.61523159058063f, 0.63439328416365f, 0.65317284295378f,
+  0.67155895484702f, 0.68954054473707f, 0.70710678118655f,
+  0.72424708295147f, 0.74095112535496f, 0.75720884650648f,
+  0.77301045336274f, 0.78834642762661f, 0.80320753148064f,
+  0.81758481315158f, 0.83146961230255f, 0.84485356524971f,
+  0.85772861000027f, 0.87008699110871f, 0.88192126434835f,
+  0.89322430119552f, 0.90398929312344f, 0.91420975570353f,
+  0.92387953251129f, 0.93299279883474f, 0.94154406518302f,
+  0.94952818059304f, 0.95694033573221f, 0.96377606579544f,
+  0.97003125319454f, 0.97570213003853f, 0.98078528040323f,
+  0.98527764238894f, 0.98917650996478f, 0.99247953459871f,
+  0.99518472667220f, 0.99729045667869f, 0.99879545620517f,
+  0.99969881869620f, 1.00000000000000f,
+  0.99969881869620f, 0.99879545620517f, 0.99729045667869f,
+  0.99518472667220f, 0.99247953459871f, 0.98917650996478f,
+  0.98527764238894f, 0.98078528040323f, 0.97570213003853f,
+  0.97003125319454f, 0.96377606579544f, 0.95694033573221f,
+  0.94952818059304f, 0.94154406518302f, 0.93299279883474f,
+  0.92387953251129f, 0.91420975570353f, 0.90398929312344f,
+  0.89322430119552f, 0.88192126434835f, 0.87008699110871f,
+  0.85772861000027f, 0.84485356524971f, 0.83146961230255f,
+  0.81758481315158f, 0.80320753148064f, 0.78834642762661f,
+  0.77301045336274f, 0.75720884650648f, 0.74095112535496f,
+  0.72424708295147f, 0.70710678118655f, 0.68954054473707f,
+  0.67155895484702f, 0.65317284295378f, 0.63439328416365f,
+  0.61523159058063f, 0.59569930449243f, 0.57580819141785f,
+  0.55557023301960f, 0.53499761988710f, 0.51410274419322f,
+  0.49289819222978f, 0.47139673682600f, 0.44961132965461f,
+  0.42755509343028f, 0.40524131400499f, 0.38268343236509f,
+  0.35989503653499f, 0.33688985339222f, 0.31368174039889f,
+  0.29028467725446f, 0.26671275747490f, 0.24298017990326f,
+  0.21910124015687f, 0.19509032201613f, 0.17096188876030f,
+  0.14673047445536f, 0.12241067519922f, 0.09801714032956f,
+  0.07356456359967f, 0.04906767432742f, 0.02454122852291f
+};
+
+
+
+// hybrib Hanning & flat window
+static const float kBlocks80w128[128] = {
+  (float)0.00000000, (float)0.03271908, (float)0.06540313, (float)0.09801714, (float)0.13052619,
+  (float)0.16289547, (float)0.19509032, (float)0.22707626, (float)0.25881905, (float)0.29028468,
+  (float)0.32143947, (float)0.35225005, (float)0.38268343, (float)0.41270703, (float)0.44228869,
+  (float)0.47139674, (float)0.50000000, (float)0.52806785, (float)0.55557023, (float)0.58247770,
+  (float)0.60876143, (float)0.63439328, (float)0.65934582, (float)0.68359230, (float)0.70710678,
+  (float)0.72986407, (float)0.75183981, (float)0.77301045, (float)0.79335334, (float)0.81284668,
+  (float)0.83146961, (float)0.84920218, (float)0.86602540, (float)0.88192126, (float)0.89687274,
+  (float)0.91086382, (float)0.92387953, (float)0.93590593, (float)0.94693013, (float)0.95694034,
+  (float)0.96592583, (float)0.97387698, (float)0.98078528, (float)0.98664333, (float)0.99144486,
+  (float)0.99518473, (float)0.99785892, (float)0.99946459, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)0.99946459, (float)0.99785892, (float)0.99518473, (float)0.99144486,
+  (float)0.98664333, (float)0.98078528, (float)0.97387698, (float)0.96592583, (float)0.95694034,
+  (float)0.94693013, (float)0.93590593, (float)0.92387953, (float)0.91086382, (float)0.89687274,
+  (float)0.88192126, (float)0.86602540, (float)0.84920218, (float)0.83146961, (float)0.81284668,
+  (float)0.79335334, (float)0.77301045, (float)0.75183981, (float)0.72986407, (float)0.70710678,
+  (float)0.68359230, (float)0.65934582, (float)0.63439328, (float)0.60876143, (float)0.58247770,
+  (float)0.55557023, (float)0.52806785, (float)0.50000000, (float)0.47139674, (float)0.44228869,
+  (float)0.41270703, (float)0.38268343, (float)0.35225005, (float)0.32143947, (float)0.29028468,
+  (float)0.25881905, (float)0.22707626, (float)0.19509032, (float)0.16289547, (float)0.13052619,
+  (float)0.09801714, (float)0.06540313, (float)0.03271908
+};
+
+// hybrib Hanning & flat window
+static const float kBlocks160w256[256] = {
+  (float)0.00000000, (float)0.01636173, (float)0.03271908, (float)0.04906767, (float)0.06540313,
+  (float)0.08172107, (float)0.09801714, (float)0.11428696, (float)0.13052619, (float)0.14673047,
+  (float)0.16289547, (float)0.17901686, (float)0.19509032, (float)0.21111155, (float)0.22707626,
+  (float)0.24298018, (float)0.25881905, (float)0.27458862, (float)0.29028468, (float)0.30590302,
+  (float)0.32143947, (float)0.33688985, (float)0.35225005, (float)0.36751594, (float)0.38268343,
+  (float)0.39774847, (float)0.41270703, (float)0.42755509, (float)0.44228869, (float)0.45690388,
+  (float)0.47139674, (float)0.48576339, (float)0.50000000, (float)0.51410274, (float)0.52806785,
+  (float)0.54189158, (float)0.55557023, (float)0.56910015, (float)0.58247770, (float)0.59569930,
+  (float)0.60876143, (float)0.62166057, (float)0.63439328, (float)0.64695615, (float)0.65934582,
+  (float)0.67155895, (float)0.68359230, (float)0.69544264, (float)0.70710678, (float)0.71858162,
+  (float)0.72986407, (float)0.74095113, (float)0.75183981, (float)0.76252720, (float)0.77301045,
+  (float)0.78328675, (float)0.79335334, (float)0.80320753, (float)0.81284668, (float)0.82226822,
+  (float)0.83146961, (float)0.84044840, (float)0.84920218, (float)0.85772861, (float)0.86602540,
+  (float)0.87409034, (float)0.88192126, (float)0.88951608, (float)0.89687274, (float)0.90398929,
+  (float)0.91086382, (float)0.91749450, (float)0.92387953, (float)0.93001722, (float)0.93590593,
+  (float)0.94154407, (float)0.94693013, (float)0.95206268, (float)0.95694034, (float)0.96156180,
+  (float)0.96592583, (float)0.97003125, (float)0.97387698, (float)0.97746197, (float)0.98078528,
+  (float)0.98384601, (float)0.98664333, (float)0.98917651, (float)0.99144486, (float)0.99344778,
+  (float)0.99518473, (float)0.99665524, (float)0.99785892, (float)0.99879546, (float)0.99946459,
+  (float)0.99986614, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)0.99986614, (float)0.99946459, (float)0.99879546, (float)0.99785892,
+  (float)0.99665524, (float)0.99518473, (float)0.99344778, (float)0.99144486, (float)0.98917651,
+  (float)0.98664333, (float)0.98384601, (float)0.98078528, (float)0.97746197, (float)0.97387698,
+  (float)0.97003125, (float)0.96592583, (float)0.96156180, (float)0.95694034, (float)0.95206268,
+  (float)0.94693013, (float)0.94154407, (float)0.93590593, (float)0.93001722, (float)0.92387953,
+  (float)0.91749450, (float)0.91086382, (float)0.90398929, (float)0.89687274, (float)0.88951608,
+  (float)0.88192126, (float)0.87409034, (float)0.86602540, (float)0.85772861, (float)0.84920218,
+  (float)0.84044840, (float)0.83146961, (float)0.82226822, (float)0.81284668, (float)0.80320753,
+  (float)0.79335334, (float)0.78328675, (float)0.77301045, (float)0.76252720, (float)0.75183981,
+  (float)0.74095113, (float)0.72986407, (float)0.71858162, (float)0.70710678, (float)0.69544264,
+  (float)0.68359230, (float)0.67155895, (float)0.65934582, (float)0.64695615, (float)0.63439328,
+  (float)0.62166057, (float)0.60876143, (float)0.59569930, (float)0.58247770, (float)0.56910015,
+  (float)0.55557023, (float)0.54189158, (float)0.52806785, (float)0.51410274, (float)0.50000000,
+  (float)0.48576339, (float)0.47139674, (float)0.45690388, (float)0.44228869, (float)0.42755509,
+  (float)0.41270703, (float)0.39774847, (float)0.38268343, (float)0.36751594, (float)0.35225005,
+  (float)0.33688985, (float)0.32143947, (float)0.30590302, (float)0.29028468, (float)0.27458862,
+  (float)0.25881905, (float)0.24298018, (float)0.22707626, (float)0.21111155, (float)0.19509032,
+  (float)0.17901686, (float)0.16289547, (float)0.14673047, (float)0.13052619, (float)0.11428696,
+  (float)0.09801714, (float)0.08172107, (float)0.06540313, (float)0.04906767, (float)0.03271908,
+  (float)0.01636173
+};
+
+// hybrib Hanning & flat window: for 20ms
+static const float kBlocks320w512[512] = {
+  (float)0.00000000, (float)0.00818114, (float)0.01636173, (float)0.02454123, (float)0.03271908,
+  (float)0.04089475, (float)0.04906767, (float)0.05723732, (float)0.06540313, (float)0.07356456,
+  (float)0.08172107, (float)0.08987211, (float)0.09801714, (float)0.10615561, (float)0.11428696,
+  (float)0.12241068, (float)0.13052619, (float)0.13863297, (float)0.14673047, (float)0.15481816,
+  (float)0.16289547, (float)0.17096189, (float)0.17901686, (float)0.18705985, (float)0.19509032,
+  (float)0.20310773, (float)0.21111155, (float)0.21910124, (float)0.22707626, (float)0.23503609,
+  (float)0.24298018, (float)0.25090801, (float)0.25881905, (float)0.26671276, (float)0.27458862,
+  (float)0.28244610, (float)0.29028468, (float)0.29810383, (float)0.30590302, (float)0.31368174,
+  (float)0.32143947, (float)0.32917568, (float)0.33688985, (float)0.34458148, (float)0.35225005,
+  (float)0.35989504, (float)0.36751594, (float)0.37511224, (float)0.38268343, (float)0.39022901,
+  (float)0.39774847, (float)0.40524131, (float)0.41270703, (float)0.42014512, (float)0.42755509,
+  (float)0.43493645, (float)0.44228869, (float)0.44961133, (float)0.45690388, (float)0.46416584,
+  (float)0.47139674, (float)0.47859608, (float)0.48576339, (float)0.49289819, (float)0.50000000,
+  (float)0.50706834, (float)0.51410274, (float)0.52110274, (float)0.52806785, (float)0.53499762,
+  (float)0.54189158, (float)0.54874927, (float)0.55557023, (float)0.56235401, (float)0.56910015,
+  (float)0.57580819, (float)0.58247770, (float)0.58910822, (float)0.59569930, (float)0.60225052,
+  (float)0.60876143, (float)0.61523159, (float)0.62166057, (float)0.62804795, (float)0.63439328,
+  (float)0.64069616, (float)0.64695615, (float)0.65317284, (float)0.65934582, (float)0.66547466,
+  (float)0.67155895, (float)0.67759830, (float)0.68359230, (float)0.68954054, (float)0.69544264,
+  (float)0.70129818, (float)0.70710678, (float)0.71286806, (float)0.71858162, (float)0.72424708,
+  (float)0.72986407, (float)0.73543221, (float)0.74095113, (float)0.74642045, (float)0.75183981,
+  (float)0.75720885, (float)0.76252720, (float)0.76779452, (float)0.77301045, (float)0.77817464,
+  (float)0.78328675, (float)0.78834643, (float)0.79335334, (float)0.79830715, (float)0.80320753,
+  (float)0.80805415, (float)0.81284668, (float)0.81758481, (float)0.82226822, (float)0.82689659,
+  (float)0.83146961, (float)0.83598698, (float)0.84044840, (float)0.84485357, (float)0.84920218,
+  (float)0.85349396, (float)0.85772861, (float)0.86190585, (float)0.86602540, (float)0.87008699,
+  (float)0.87409034, (float)0.87803519, (float)0.88192126, (float)0.88574831, (float)0.88951608,
+  (float)0.89322430, (float)0.89687274, (float)0.90046115, (float)0.90398929, (float)0.90745693,
+  (float)0.91086382, (float)0.91420976, (float)0.91749450, (float)0.92071783, (float)0.92387953,
+  (float)0.92697940, (float)0.93001722, (float)0.93299280, (float)0.93590593, (float)0.93875641,
+  (float)0.94154407, (float)0.94426870, (float)0.94693013, (float)0.94952818, (float)0.95206268,
+  (float)0.95453345, (float)0.95694034, (float)0.95928317, (float)0.96156180, (float)0.96377607,
+  (float)0.96592583, (float)0.96801094, (float)0.97003125, (float)0.97198664, (float)0.97387698,
+  (float)0.97570213, (float)0.97746197, (float)0.97915640, (float)0.98078528, (float)0.98234852,
+  (float)0.98384601, (float)0.98527764, (float)0.98664333, (float)0.98794298, (float)0.98917651,
+  (float)0.99034383, (float)0.99144486, (float)0.99247953, (float)0.99344778, (float)0.99434953,
+  (float)0.99518473, (float)0.99595331, (float)0.99665524, (float)0.99729046, (float)0.99785892,
+  (float)0.99836060, (float)0.99879546, (float)0.99916346, (float)0.99946459, (float)0.99969882,
+  (float)0.99986614, (float)0.99996653, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)0.99996653, (float)0.99986614, (float)0.99969882, (float)0.99946459,
+  (float)0.99916346, (float)0.99879546, (float)0.99836060, (float)0.99785892, (float)0.99729046,
+  (float)0.99665524, (float)0.99595331, (float)0.99518473, (float)0.99434953, (float)0.99344778,
+  (float)0.99247953, (float)0.99144486, (float)0.99034383, (float)0.98917651, (float)0.98794298,
+  (float)0.98664333, (float)0.98527764, (float)0.98384601, (float)0.98234852, (float)0.98078528,
+  (float)0.97915640, (float)0.97746197, (float)0.97570213, (float)0.97387698, (float)0.97198664,
+  (float)0.97003125, (float)0.96801094, (float)0.96592583, (float)0.96377607, (float)0.96156180,
+  (float)0.95928317, (float)0.95694034, (float)0.95453345, (float)0.95206268, (float)0.94952818,
+  (float)0.94693013, (float)0.94426870, (float)0.94154407, (float)0.93875641, (float)0.93590593,
+  (float)0.93299280, (float)0.93001722, (float)0.92697940, (float)0.92387953, (float)0.92071783,
+  (float)0.91749450, (float)0.91420976, (float)0.91086382, (float)0.90745693, (float)0.90398929,
+  (float)0.90046115, (float)0.89687274, (float)0.89322430, (float)0.88951608, (float)0.88574831,
+  (float)0.88192126, (float)0.87803519, (float)0.87409034, (float)0.87008699, (float)0.86602540,
+  (float)0.86190585, (float)0.85772861, (float)0.85349396, (float)0.84920218, (float)0.84485357,
+  (float)0.84044840, (float)0.83598698, (float)0.83146961, (float)0.82689659, (float)0.82226822,
+  (float)0.81758481, (float)0.81284668, (float)0.80805415, (float)0.80320753, (float)0.79830715,
+  (float)0.79335334, (float)0.78834643, (float)0.78328675, (float)0.77817464, (float)0.77301045,
+  (float)0.76779452, (float)0.76252720, (float)0.75720885, (float)0.75183981, (float)0.74642045,
+  (float)0.74095113, (float)0.73543221, (float)0.72986407, (float)0.72424708, (float)0.71858162,
+  (float)0.71286806, (float)0.70710678, (float)0.70129818, (float)0.69544264, (float)0.68954054,
+  (float)0.68359230, (float)0.67759830, (float)0.67155895, (float)0.66547466, (float)0.65934582,
+  (float)0.65317284, (float)0.64695615, (float)0.64069616, (float)0.63439328, (float)0.62804795,
+  (float)0.62166057, (float)0.61523159, (float)0.60876143, (float)0.60225052, (float)0.59569930,
+  (float)0.58910822, (float)0.58247770, (float)0.57580819, (float)0.56910015, (float)0.56235401,
+  (float)0.55557023, (float)0.54874927, (float)0.54189158, (float)0.53499762, (float)0.52806785,
+  (float)0.52110274, (float)0.51410274, (float)0.50706834, (float)0.50000000, (float)0.49289819,
+  (float)0.48576339, (float)0.47859608, (float)0.47139674, (float)0.46416584, (float)0.45690388,
+  (float)0.44961133, (float)0.44228869, (float)0.43493645, (float)0.42755509, (float)0.42014512,
+  (float)0.41270703, (float)0.40524131, (float)0.39774847, (float)0.39022901, (float)0.38268343,
+  (float)0.37511224, (float)0.36751594, (float)0.35989504, (float)0.35225005, (float)0.34458148,
+  (float)0.33688985, (float)0.32917568, (float)0.32143947, (float)0.31368174, (float)0.30590302,
+  (float)0.29810383, (float)0.29028468, (float)0.28244610, (float)0.27458862, (float)0.26671276,
+  (float)0.25881905, (float)0.25090801, (float)0.24298018, (float)0.23503609, (float)0.22707626,
+  (float)0.21910124, (float)0.21111155, (float)0.20310773, (float)0.19509032, (float)0.18705985,
+  (float)0.17901686, (float)0.17096189, (float)0.16289547, (float)0.15481816, (float)0.14673047,
+  (float)0.13863297, (float)0.13052619, (float)0.12241068, (float)0.11428696, (float)0.10615561,
+  (float)0.09801714, (float)0.08987211, (float)0.08172107, (float)0.07356456, (float)0.06540313,
+  (float)0.05723732, (float)0.04906767, (float)0.04089475, (float)0.03271908, (float)0.02454123,
+  (float)0.01636173, (float)0.00818114
+};
+
+
+// Hanning window: for 15ms at 16kHz with symmetric zeros
+static const float kBlocks240w512[512] = {
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00654494, (float)0.01308960, (float)0.01963369,
+  (float)0.02617695, (float)0.03271908, (float)0.03925982, (float)0.04579887, (float)0.05233596,
+  (float)0.05887080, (float)0.06540313, (float)0.07193266, (float)0.07845910, (float)0.08498218,
+  (float)0.09150162, (float)0.09801714, (float)0.10452846, (float)0.11103531, (float)0.11753740,
+  (float)0.12403446, (float)0.13052620, (float)0.13701233, (float)0.14349262, (float)0.14996676,
+  (float)0.15643448, (float)0.16289547, (float)0.16934951, (float)0.17579629, (float)0.18223552,
+  (float)0.18866697, (float)0.19509032, (float)0.20150533, (float)0.20791170, (float)0.21430916,
+  (float)0.22069745, (float)0.22707628, (float)0.23344538, (float)0.23980446, (float)0.24615330,
+  (float)0.25249159, (float)0.25881904, (float)0.26513544, (float)0.27144045, (float)0.27773386,
+  (float)0.28401536, (float)0.29028466, (float)0.29654160, (float)0.30278578, (float)0.30901700,
+  (float)0.31523499, (float)0.32143945, (float)0.32763019, (float)0.33380687, (float)0.33996925,
+  (float)0.34611708, (float)0.35225007, (float)0.35836795, (float)0.36447051, (float)0.37055743,
+  (float)0.37662852, (float)0.38268346, (float)0.38872197, (float)0.39474389, (float)0.40074885,
+  (float)0.40673664, (float)0.41270703, (float)0.41865975, (float)0.42459452, (float)0.43051112,
+  (float)0.43640924, (float)0.44228873, (float)0.44814920, (float)0.45399052, (float)0.45981237,
+  (float)0.46561453, (float)0.47139674, (float)0.47715878, (float)0.48290035, (float)0.48862126,
+  (float)0.49432120, (float)0.50000000, (float)0.50565743, (float)0.51129311, (float)0.51690692,
+  (float)0.52249855, (float)0.52806789, (float)0.53361452, (float)0.53913832, (float)0.54463905,
+  (float)0.55011642, (float)0.55557024, (float)0.56100029, (float)0.56640625, (float)0.57178795,
+  (float)0.57714522, (float)0.58247769, (float)0.58778524, (float)0.59306765, (float)0.59832460,
+  (float)0.60355598, (float)0.60876143, (float)0.61394083, (float)0.61909395, (float)0.62422055,
+  (float)0.62932038, (float)0.63439333, (float)0.63943899, (float)0.64445734, (float)0.64944810,
+  (float)0.65441096, (float)0.65934587, (float)0.66425246, (float)0.66913062, (float)0.67398012,
+  (float)0.67880076, (float)0.68359232, (float)0.68835455, (float)0.69308740, (float)0.69779050,
+  (float)0.70246369, (float)0.70710677, (float)0.71171963, (float)0.71630198, (float)0.72085363,
+  (float)0.72537440, (float)0.72986406, (float)0.73432255, (float)0.73874950, (float)0.74314487,
+  (float)0.74750835, (float)0.75183982, (float)0.75613910, (float)0.76040596, (float)0.76464027,
+  (float)0.76884186, (float)0.77301043, (float)0.77714598, (float)0.78124821, (float)0.78531694,
+  (float)0.78935206, (float)0.79335338, (float)0.79732066, (float)0.80125386, (float)0.80515265,
+  (float)0.80901700, (float)0.81284672, (float)0.81664157, (float)0.82040149, (float)0.82412618,
+  (float)0.82781565, (float)0.83146966, (float)0.83508795, (float)0.83867061, (float)0.84221727,
+  (float)0.84572780, (float)0.84920216, (float)0.85264021, (float)0.85604161, (float)0.85940641,
+  (float)0.86273444, (float)0.86602545, (float)0.86927933, (float)0.87249607, (float)0.87567532,
+  (float)0.87881714, (float)0.88192129, (float)0.88498765, (float)0.88801610, (float)0.89100653,
+  (float)0.89395881, (float)0.89687276, (float)0.89974827, (float)0.90258533, (float)0.90538365,
+  (float)0.90814316, (float)0.91086388, (float)0.91354549, (float)0.91618794, (float)0.91879123,
+  (float)0.92135513, (float)0.92387950, (float)0.92636442, (float)0.92880958, (float)0.93121493,
+  (float)0.93358046, (float)0.93590593, (float)0.93819135, (float)0.94043654, (float)0.94264150,
+  (float)0.94480604, (float)0.94693011, (float)0.94901365, (float)0.95105654, (float)0.95305866,
+  (float)0.95501995, (float)0.95694035, (float)0.95881975, (float)0.96065807, (float)0.96245527,
+  (float)0.96421117, (float)0.96592581, (float)0.96759909, (float)0.96923089, (float)0.97082120,
+  (float)0.97236991, (float)0.97387701, (float)0.97534233, (float)0.97676587, (float)0.97814763,
+  (float)0.97948742, (float)0.98078531, (float)0.98204112, (float)0.98325491, (float)0.98442656,
+  (float)0.98555607, (float)0.98664331, (float)0.98768836, (float)0.98869103, (float)0.98965138,
+  (float)0.99056935, (float)0.99144489, (float)0.99227792, (float)0.99306846, (float)0.99381649,
+  (float)0.99452192, (float)0.99518472, (float)0.99580491, (float)0.99638247, (float)0.99691731,
+  (float)0.99740952, (float)0.99785894, (float)0.99826562, (float)0.99862951, (float)0.99895066,
+  (float)0.99922901, (float)0.99946457, (float)0.99965733, (float)0.99980724, (float)0.99991435,
+  (float)0.99997860, (float)1.00000000, (float)0.99997860, (float)0.99991435, (float)0.99980724,
+  (float)0.99965733, (float)0.99946457, (float)0.99922901, (float)0.99895066, (float)0.99862951,
+  (float)0.99826562, (float)0.99785894, (float)0.99740946, (float)0.99691731, (float)0.99638247,
+  (float)0.99580491, (float)0.99518472, (float)0.99452192, (float)0.99381644, (float)0.99306846,
+  (float)0.99227792, (float)0.99144489, (float)0.99056935, (float)0.98965138, (float)0.98869103,
+  (float)0.98768836, (float)0.98664331, (float)0.98555607, (float)0.98442656, (float)0.98325491,
+  (float)0.98204112, (float)0.98078525, (float)0.97948742, (float)0.97814757, (float)0.97676587,
+  (float)0.97534227, (float)0.97387695, (float)0.97236991, (float)0.97082120, (float)0.96923089,
+  (float)0.96759909, (float)0.96592581, (float)0.96421117, (float)0.96245521, (float)0.96065807,
+  (float)0.95881969, (float)0.95694029, (float)0.95501995, (float)0.95305860, (float)0.95105648,
+  (float)0.94901365, (float)0.94693011, (float)0.94480604, (float)0.94264150, (float)0.94043654,
+  (float)0.93819129, (float)0.93590593, (float)0.93358046, (float)0.93121493, (float)0.92880952,
+  (float)0.92636436, (float)0.92387950, (float)0.92135507, (float)0.91879123, (float)0.91618794,
+  (float)0.91354543, (float)0.91086382, (float)0.90814310, (float)0.90538365, (float)0.90258527,
+  (float)0.89974827, (float)0.89687276, (float)0.89395875, (float)0.89100647, (float)0.88801610,
+  (float)0.88498759, (float)0.88192123, (float)0.87881714, (float)0.87567532, (float)0.87249595,
+  (float)0.86927933, (float)0.86602539, (float)0.86273432, (float)0.85940641, (float)0.85604161,
+  (float)0.85264009, (float)0.84920216, (float)0.84572780, (float)0.84221715, (float)0.83867055,
+  (float)0.83508795, (float)0.83146954, (float)0.82781565, (float)0.82412612, (float)0.82040137,
+  (float)0.81664157, (float)0.81284660, (float)0.80901700, (float)0.80515265, (float)0.80125374,
+  (float)0.79732066, (float)0.79335332, (float)0.78935200, (float)0.78531694, (float)0.78124815,
+  (float)0.77714586, (float)0.77301049, (float)0.76884180, (float)0.76464021, (float)0.76040596,
+  (float)0.75613904, (float)0.75183970, (float)0.74750835, (float)0.74314481, (float)0.73874938,
+  (float)0.73432249, (float)0.72986400, (float)0.72537428, (float)0.72085363, (float)0.71630186,
+  (float)0.71171951, (float)0.70710677, (float)0.70246363, (float)0.69779032, (float)0.69308734,
+  (float)0.68835449, (float)0.68359220, (float)0.67880070, (float)0.67398006, (float)0.66913044,
+  (float)0.66425240, (float)0.65934575, (float)0.65441096, (float)0.64944804, (float)0.64445722,
+  (float)0.63943905, (float)0.63439327, (float)0.62932026, (float)0.62422055, (float)0.61909389,
+  (float)0.61394072, (float)0.60876143, (float)0.60355592, (float)0.59832448, (float)0.59306765,
+  (float)0.58778518, (float)0.58247757, (float)0.57714522, (float)0.57178789, (float)0.56640613,
+  (float)0.56100023, (float)0.55557019, (float)0.55011630, (float)0.54463905, (float)0.53913826,
+  (float)0.53361434, (float)0.52806783, (float)0.52249849, (float)0.51690674, (float)0.51129305,
+  (float)0.50565726, (float)0.50000006, (float)0.49432117, (float)0.48862115, (float)0.48290038,
+  (float)0.47715873, (float)0.47139663, (float)0.46561456, (float)0.45981231, (float)0.45399037,
+  (float)0.44814920, (float)0.44228864, (float)0.43640912, (float)0.43051112, (float)0.42459446,
+  (float)0.41865960, (float)0.41270703, (float)0.40673658, (float)0.40074870, (float)0.39474386,
+  (float)0.38872188, (float)0.38268328, (float)0.37662849, (float)0.37055734, (float)0.36447033,
+  (float)0.35836792, (float)0.35224995, (float)0.34611690, (float)0.33996922, (float)0.33380675,
+  (float)0.32763001, (float)0.32143945, (float)0.31523487, (float)0.30901679, (float)0.30278572,
+  (float)0.29654145, (float)0.29028472, (float)0.28401530, (float)0.27773371, (float)0.27144048,
+  (float)0.26513538, (float)0.25881892, (float)0.25249159, (float)0.24615324, (float)0.23980433,
+  (float)0.23344538, (float)0.22707619, (float)0.22069728, (float)0.21430916, (float)0.20791161,
+  (float)0.20150517, (float)0.19509031, (float)0.18866688, (float)0.18223536, (float)0.17579627,
+  (float)0.16934940, (float)0.16289529, (float)0.15643445, (float)0.14996666, (float)0.14349243,
+  (float)0.13701232, (float)0.13052608, (float)0.12403426, (float)0.11753736, (float)0.11103519,
+  (float)0.10452849, (float)0.09801710, (float)0.09150149, (float)0.08498220, (float)0.07845904,
+  (float)0.07193252, (float)0.06540315, (float)0.05887074, (float)0.05233581, (float)0.04579888,
+  (float)0.03925974, (float)0.03271893, (float)0.02617695, (float)0.01963361, (float)0.01308943,
+  (float)0.00654493, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000
+};
+
+
+// Hanning window: for 30ms with 1024 fft with symmetric zeros at 16kHz
+static const float kBlocks480w1024[1024] = {
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00327249, (float)0.00654494,
+  (float)0.00981732, (float)0.01308960, (float)0.01636173, (float)0.01963369, (float)0.02290544,
+  (float)0.02617695, (float)0.02944817, (float)0.03271908, (float)0.03598964, (float)0.03925982,
+  (float)0.04252957, (float)0.04579887, (float)0.04906768, (float)0.05233596, (float)0.05560368,
+  (float)0.05887080, (float)0.06213730, (float)0.06540313, (float)0.06866825, (float)0.07193266,
+  (float)0.07519628, (float)0.07845910, (float)0.08172107, (float)0.08498218, (float)0.08824237,
+  (float)0.09150162, (float)0.09475989, (float)0.09801714, (float)0.10127335, (float)0.10452846,
+  (float)0.10778246, (float)0.11103531, (float)0.11428697, (float)0.11753740, (float)0.12078657,
+  (float)0.12403446, (float)0.12728101, (float)0.13052620, (float)0.13376999, (float)0.13701233,
+  (float)0.14025325, (float)0.14349262, (float)0.14673047, (float)0.14996676, (float)0.15320145,
+  (float)0.15643448, (float)0.15966582, (float)0.16289547, (float)0.16612339, (float)0.16934951,
+  (float)0.17257382, (float)0.17579629, (float)0.17901687, (float)0.18223552, (float)0.18545224,
+  (float)0.18866697, (float)0.19187967, (float)0.19509032, (float)0.19829889, (float)0.20150533,
+  (float)0.20470962, (float)0.20791170, (float)0.21111156, (float)0.21430916, (float)0.21750447,
+  (float)0.22069745, (float)0.22388805, (float)0.22707628, (float)0.23026206, (float)0.23344538,
+  (float)0.23662618, (float)0.23980446, (float)0.24298020, (float)0.24615330, (float)0.24932377,
+  (float)0.25249159, (float)0.25565669, (float)0.25881904, (float)0.26197866, (float)0.26513544,
+  (float)0.26828939, (float)0.27144045, (float)0.27458861, (float)0.27773386, (float)0.28087610,
+  (float)0.28401536, (float)0.28715158, (float)0.29028466, (float)0.29341471, (float)0.29654160,
+  (float)0.29966527, (float)0.30278578, (float)0.30590302, (float)0.30901700, (float)0.31212768,
+  (float)0.31523499, (float)0.31833893, (float)0.32143945, (float)0.32453656, (float)0.32763019,
+  (float)0.33072028, (float)0.33380687, (float)0.33688986, (float)0.33996925, (float)0.34304500,
+  (float)0.34611708, (float)0.34918544, (float)0.35225007, (float)0.35531089, (float)0.35836795,
+  (float)0.36142117, (float)0.36447051, (float)0.36751595, (float)0.37055743, (float)0.37359497,
+  (float)0.37662852, (float)0.37965801, (float)0.38268346, (float)0.38570479, (float)0.38872197,
+  (float)0.39173502, (float)0.39474389, (float)0.39774847, (float)0.40074885, (float)0.40374491,
+  (float)0.40673664, (float)0.40972406, (float)0.41270703, (float)0.41568562, (float)0.41865975,
+  (float)0.42162940, (float)0.42459452, (float)0.42755508, (float)0.43051112, (float)0.43346250,
+  (float)0.43640924, (float)0.43935132, (float)0.44228873, (float)0.44522133, (float)0.44814920,
+  (float)0.45107228, (float)0.45399052, (float)0.45690390, (float)0.45981237, (float)0.46271592,
+  (float)0.46561453, (float)0.46850815, (float)0.47139674, (float)0.47428030, (float)0.47715878,
+  (float)0.48003215, (float)0.48290035, (float)0.48576337, (float)0.48862126, (float)0.49147385,
+  (float)0.49432120, (float)0.49716330, (float)0.50000000, (float)0.50283140, (float)0.50565743,
+  (float)0.50847799, (float)0.51129311, (float)0.51410276, (float)0.51690692, (float)0.51970553,
+  (float)0.52249855, (float)0.52528602, (float)0.52806789, (float)0.53084403, (float)0.53361452,
+  (float)0.53637928, (float)0.53913832, (float)0.54189163, (float)0.54463905, (float)0.54738063,
+  (float)0.55011642, (float)0.55284631, (float)0.55557024, (float)0.55828828, (float)0.56100029,
+  (float)0.56370628, (float)0.56640625, (float)0.56910014, (float)0.57178795, (float)0.57446963,
+  (float)0.57714522, (float)0.57981455, (float)0.58247769, (float)0.58513463, (float)0.58778524,
+  (float)0.59042960, (float)0.59306765, (float)0.59569931, (float)0.59832460, (float)0.60094351,
+  (float)0.60355598, (float)0.60616195, (float)0.60876143, (float)0.61135441, (float)0.61394083,
+  (float)0.61652070, (float)0.61909395, (float)0.62166059, (float)0.62422055, (float)0.62677383,
+  (float)0.62932038, (float)0.63186020, (float)0.63439333, (float)0.63691956, (float)0.63943899,
+  (float)0.64195162, (float)0.64445734, (float)0.64695615, (float)0.64944810, (float)0.65193301,
+  (float)0.65441096, (float)0.65688187, (float)0.65934587, (float)0.66180271, (float)0.66425246,
+  (float)0.66669512, (float)0.66913062, (float)0.67155898, (float)0.67398012, (float)0.67639405,
+  (float)0.67880076, (float)0.68120021, (float)0.68359232, (float)0.68597710, (float)0.68835455,
+  (float)0.69072467, (float)0.69308740, (float)0.69544262, (float)0.69779050, (float)0.70013082,
+  (float)0.70246369, (float)0.70478904, (float)0.70710677, (float)0.70941699, (float)0.71171963,
+  (float)0.71401459, (float)0.71630198, (float)0.71858168, (float)0.72085363, (float)0.72311789,
+  (float)0.72537440, (float)0.72762316, (float)0.72986406, (float)0.73209721, (float)0.73432255,
+  (float)0.73653996, (float)0.73874950, (float)0.74095118, (float)0.74314487, (float)0.74533057,
+  (float)0.74750835, (float)0.74967808, (float)0.75183982, (float)0.75399351, (float)0.75613910,
+  (float)0.75827658, (float)0.76040596, (float)0.76252723, (float)0.76464027, (float)0.76674515,
+  (float)0.76884186, (float)0.77093029, (float)0.77301043, (float)0.77508241, (float)0.77714598,
+  (float)0.77920127, (float)0.78124821, (float)0.78328675, (float)0.78531694, (float)0.78733873,
+  (float)0.78935206, (float)0.79135692, (float)0.79335338, (float)0.79534125, (float)0.79732066,
+  (float)0.79929149, (float)0.80125386, (float)0.80320752, (float)0.80515265, (float)0.80708915,
+  (float)0.80901700, (float)0.81093621, (float)0.81284672, (float)0.81474853, (float)0.81664157,
+  (float)0.81852591, (float)0.82040149, (float)0.82226825, (float)0.82412618, (float)0.82597536,
+  (float)0.82781565, (float)0.82964706, (float)0.83146966, (float)0.83328325, (float)0.83508795,
+  (float)0.83688378, (float)0.83867061, (float)0.84044838, (float)0.84221727, (float)0.84397703,
+  (float)0.84572780, (float)0.84746957, (float)0.84920216, (float)0.85092574, (float)0.85264021,
+  (float)0.85434544, (float)0.85604161, (float)0.85772866, (float)0.85940641, (float)0.86107504,
+  (float)0.86273444, (float)0.86438453, (float)0.86602545, (float)0.86765707, (float)0.86927933,
+  (float)0.87089235, (float)0.87249607, (float)0.87409031, (float)0.87567532, (float)0.87725097,
+  (float)0.87881714, (float)0.88037390, (float)0.88192129, (float)0.88345921, (float)0.88498765,
+  (float)0.88650668, (float)0.88801610, (float)0.88951612, (float)0.89100653, (float)0.89248741,
+  (float)0.89395881, (float)0.89542055, (float)0.89687276, (float)0.89831537, (float)0.89974827,
+  (float)0.90117162, (float)0.90258533, (float)0.90398932, (float)0.90538365, (float)0.90676826,
+  (float)0.90814316, (float)0.90950841, (float)0.91086388, (float)0.91220951, (float)0.91354549,
+  (float)0.91487163, (float)0.91618794, (float)0.91749454, (float)0.91879123, (float)0.92007810,
+  (float)0.92135513, (float)0.92262226, (float)0.92387950, (float)0.92512691, (float)0.92636442,
+  (float)0.92759192, (float)0.92880958, (float)0.93001723, (float)0.93121493, (float)0.93240267,
+  (float)0.93358046, (float)0.93474817, (float)0.93590593, (float)0.93705362, (float)0.93819135,
+  (float)0.93931901, (float)0.94043654, (float)0.94154406, (float)0.94264150, (float)0.94372880,
+  (float)0.94480604, (float)0.94587320, (float)0.94693011, (float)0.94797695, (float)0.94901365,
+  (float)0.95004016, (float)0.95105654, (float)0.95206273, (float)0.95305866, (float)0.95404440,
+  (float)0.95501995, (float)0.95598525, (float)0.95694035, (float)0.95788521, (float)0.95881975,
+  (float)0.95974404, (float)0.96065807, (float)0.96156180, (float)0.96245527, (float)0.96333838,
+  (float)0.96421117, (float)0.96507370, (float)0.96592581, (float)0.96676767, (float)0.96759909,
+  (float)0.96842021, (float)0.96923089, (float)0.97003126, (float)0.97082120, (float)0.97160077,
+  (float)0.97236991, (float)0.97312868, (float)0.97387701, (float)0.97461486, (float)0.97534233,
+  (float)0.97605932, (float)0.97676587, (float)0.97746199, (float)0.97814763, (float)0.97882277,
+  (float)0.97948742, (float)0.98014158, (float)0.98078531, (float)0.98141843, (float)0.98204112,
+  (float)0.98265332, (float)0.98325491, (float)0.98384601, (float)0.98442656, (float)0.98499662,
+  (float)0.98555607, (float)0.98610497, (float)0.98664331, (float)0.98717111, (float)0.98768836,
+  (float)0.98819500, (float)0.98869103, (float)0.98917651, (float)0.98965138, (float)0.99011570,
+  (float)0.99056935, (float)0.99101239, (float)0.99144489, (float)0.99186671, (float)0.99227792,
+  (float)0.99267852, (float)0.99306846, (float)0.99344778, (float)0.99381649, (float)0.99417448,
+  (float)0.99452192, (float)0.99485862, (float)0.99518472, (float)0.99550015, (float)0.99580491,
+  (float)0.99609905, (float)0.99638247, (float)0.99665523, (float)0.99691731, (float)0.99716878,
+  (float)0.99740952, (float)0.99763954, (float)0.99785894, (float)0.99806762, (float)0.99826562,
+  (float)0.99845290, (float)0.99862951, (float)0.99879545, (float)0.99895066, (float)0.99909520,
+  (float)0.99922901, (float)0.99935216, (float)0.99946457, (float)0.99956632, (float)0.99965733,
+  (float)0.99973762, (float)0.99980724, (float)0.99986613, (float)0.99991435, (float)0.99995178,
+  (float)0.99997860, (float)0.99999464, (float)1.00000000, (float)0.99999464, (float)0.99997860,
+  (float)0.99995178, (float)0.99991435, (float)0.99986613, (float)0.99980724, (float)0.99973762,
+  (float)0.99965733, (float)0.99956632, (float)0.99946457, (float)0.99935216, (float)0.99922901,
+  (float)0.99909520, (float)0.99895066, (float)0.99879545, (float)0.99862951, (float)0.99845290,
+  (float)0.99826562, (float)0.99806762, (float)0.99785894, (float)0.99763954, (float)0.99740946,
+  (float)0.99716872, (float)0.99691731, (float)0.99665523, (float)0.99638247, (float)0.99609905,
+  (float)0.99580491, (float)0.99550015, (float)0.99518472, (float)0.99485862, (float)0.99452192,
+  (float)0.99417448, (float)0.99381644, (float)0.99344778, (float)0.99306846, (float)0.99267852,
+  (float)0.99227792, (float)0.99186671, (float)0.99144489, (float)0.99101239, (float)0.99056935,
+  (float)0.99011564, (float)0.98965138, (float)0.98917651, (float)0.98869103, (float)0.98819494,
+  (float)0.98768836, (float)0.98717111, (float)0.98664331, (float)0.98610497, (float)0.98555607,
+  (float)0.98499656, (float)0.98442656, (float)0.98384601, (float)0.98325491, (float)0.98265326,
+  (float)0.98204112, (float)0.98141843, (float)0.98078525, (float)0.98014158, (float)0.97948742,
+  (float)0.97882277, (float)0.97814757, (float)0.97746193, (float)0.97676587, (float)0.97605932,
+  (float)0.97534227, (float)0.97461486, (float)0.97387695, (float)0.97312862, (float)0.97236991,
+  (float)0.97160077, (float)0.97082120, (float)0.97003126, (float)0.96923089, (float)0.96842015,
+  (float)0.96759909, (float)0.96676761, (float)0.96592581, (float)0.96507365, (float)0.96421117,
+  (float)0.96333838, (float)0.96245521, (float)0.96156180, (float)0.96065807, (float)0.95974404,
+  (float)0.95881969, (float)0.95788515, (float)0.95694029, (float)0.95598525, (float)0.95501995,
+  (float)0.95404440, (float)0.95305860, (float)0.95206267, (float)0.95105648, (float)0.95004016,
+  (float)0.94901365, (float)0.94797695, (float)0.94693011, (float)0.94587314, (float)0.94480604,
+  (float)0.94372880, (float)0.94264150, (float)0.94154406, (float)0.94043654, (float)0.93931895,
+  (float)0.93819129, (float)0.93705362, (float)0.93590593, (float)0.93474817, (float)0.93358046,
+  (float)0.93240267, (float)0.93121493, (float)0.93001723, (float)0.92880952, (float)0.92759192,
+  (float)0.92636436, (float)0.92512691, (float)0.92387950, (float)0.92262226, (float)0.92135507,
+  (float)0.92007804, (float)0.91879123, (float)0.91749448, (float)0.91618794, (float)0.91487157,
+  (float)0.91354543, (float)0.91220951, (float)0.91086382, (float)0.90950835, (float)0.90814310,
+  (float)0.90676820, (float)0.90538365, (float)0.90398932, (float)0.90258527, (float)0.90117157,
+  (float)0.89974827, (float)0.89831525, (float)0.89687276, (float)0.89542055, (float)0.89395875,
+  (float)0.89248741, (float)0.89100647, (float)0.88951600, (float)0.88801610, (float)0.88650662,
+  (float)0.88498759, (float)0.88345915, (float)0.88192123, (float)0.88037384, (float)0.87881714,
+  (float)0.87725091, (float)0.87567532, (float)0.87409031, (float)0.87249595, (float)0.87089223,
+  (float)0.86927933, (float)0.86765701, (float)0.86602539, (float)0.86438447, (float)0.86273432,
+  (float)0.86107504, (float)0.85940641, (float)0.85772860, (float)0.85604161, (float)0.85434544,
+  (float)0.85264009, (float)0.85092574, (float)0.84920216, (float)0.84746951, (float)0.84572780,
+  (float)0.84397697, (float)0.84221715, (float)0.84044844, (float)0.83867055, (float)0.83688372,
+  (float)0.83508795, (float)0.83328319, (float)0.83146954, (float)0.82964706, (float)0.82781565,
+  (float)0.82597530, (float)0.82412612, (float)0.82226813, (float)0.82040137, (float)0.81852591,
+  (float)0.81664157, (float)0.81474847, (float)0.81284660, (float)0.81093609, (float)0.80901700,
+  (float)0.80708915, (float)0.80515265, (float)0.80320752, (float)0.80125374, (float)0.79929143,
+  (float)0.79732066, (float)0.79534125, (float)0.79335332, (float)0.79135686, (float)0.78935200,
+  (float)0.78733861, (float)0.78531694, (float)0.78328675, (float)0.78124815, (float)0.77920121,
+  (float)0.77714586, (float)0.77508223, (float)0.77301049, (float)0.77093029, (float)0.76884180,
+  (float)0.76674509, (float)0.76464021, (float)0.76252711, (float)0.76040596, (float)0.75827658,
+  (float)0.75613904, (float)0.75399339, (float)0.75183970, (float)0.74967796, (float)0.74750835,
+  (float)0.74533057, (float)0.74314481, (float)0.74095106, (float)0.73874938, (float)0.73653996,
+  (float)0.73432249, (float)0.73209721, (float)0.72986400, (float)0.72762305, (float)0.72537428,
+  (float)0.72311789, (float)0.72085363, (float)0.71858162, (float)0.71630186, (float)0.71401453,
+  (float)0.71171951, (float)0.70941705, (float)0.70710677, (float)0.70478898, (float)0.70246363,
+  (float)0.70013070, (float)0.69779032, (float)0.69544268, (float)0.69308734, (float)0.69072461,
+  (float)0.68835449, (float)0.68597704, (float)0.68359220, (float)0.68120021, (float)0.67880070,
+  (float)0.67639399, (float)0.67398006, (float)0.67155886, (float)0.66913044, (float)0.66669512,
+  (float)0.66425240, (float)0.66180259, (float)0.65934575, (float)0.65688181, (float)0.65441096,
+  (float)0.65193301, (float)0.64944804, (float)0.64695609, (float)0.64445722, (float)0.64195150,
+  (float)0.63943905, (float)0.63691956, (float)0.63439327, (float)0.63186014, (float)0.62932026,
+  (float)0.62677372, (float)0.62422055, (float)0.62166059, (float)0.61909389, (float)0.61652064,
+  (float)0.61394072, (float)0.61135429, (float)0.60876143, (float)0.60616189, (float)0.60355592,
+  (float)0.60094339, (float)0.59832448, (float)0.59569913, (float)0.59306765, (float)0.59042960,
+  (float)0.58778518, (float)0.58513451, (float)0.58247757, (float)0.57981461, (float)0.57714522,
+  (float)0.57446963, (float)0.57178789, (float)0.56910002, (float)0.56640613, (float)0.56370628,
+  (float)0.56100023, (float)0.55828822, (float)0.55557019, (float)0.55284619, (float)0.55011630,
+  (float)0.54738069, (float)0.54463905, (float)0.54189152, (float)0.53913826, (float)0.53637916,
+  (float)0.53361434, (float)0.53084403, (float)0.52806783, (float)0.52528596, (float)0.52249849,
+  (float)0.51970541, (float)0.51690674, (float)0.51410276, (float)0.51129305, (float)0.50847787,
+  (float)0.50565726, (float)0.50283122, (float)0.50000006, (float)0.49716327, (float)0.49432117,
+  (float)0.49147379, (float)0.48862115, (float)0.48576325, (float)0.48290038, (float)0.48003212,
+  (float)0.47715873, (float)0.47428021, (float)0.47139663, (float)0.46850798, (float)0.46561456,
+  (float)0.46271589, (float)0.45981231, (float)0.45690379, (float)0.45399037, (float)0.45107210,
+  (float)0.44814920, (float)0.44522130, (float)0.44228864, (float)0.43935123, (float)0.43640912,
+  (float)0.43346232, (float)0.43051112, (float)0.42755505, (float)0.42459446, (float)0.42162928,
+  (float)0.41865960, (float)0.41568545, (float)0.41270703, (float)0.40972400, (float)0.40673658,
+  (float)0.40374479, (float)0.40074870, (float)0.39774850, (float)0.39474386, (float)0.39173496,
+  (float)0.38872188, (float)0.38570464, (float)0.38268328, (float)0.37965804, (float)0.37662849,
+  (float)0.37359491, (float)0.37055734, (float)0.36751580, (float)0.36447033, (float)0.36142117,
+  (float)0.35836792, (float)0.35531086, (float)0.35224995, (float)0.34918529, (float)0.34611690,
+  (float)0.34304500, (float)0.33996922, (float)0.33688980, (float)0.33380675, (float)0.33072016,
+  (float)0.32763001, (float)0.32453656, (float)0.32143945, (float)0.31833887, (float)0.31523487,
+  (float)0.31212750, (float)0.30901679, (float)0.30590302, (float)0.30278572, (float)0.29966521,
+  (float)0.29654145, (float)0.29341453, (float)0.29028472, (float)0.28715155, (float)0.28401530,
+  (float)0.28087601, (float)0.27773371, (float)0.27458847, (float)0.27144048, (float)0.26828936,
+  (float)0.26513538, (float)0.26197854, (float)0.25881892, (float)0.25565651, (float)0.25249159,
+  (float)0.24932374, (float)0.24615324, (float)0.24298008, (float)0.23980433, (float)0.23662600,
+  (float)0.23344538, (float)0.23026201, (float)0.22707619, (float)0.22388794, (float)0.22069728,
+  (float)0.21750426, (float)0.21430916, (float)0.21111152, (float)0.20791161, (float)0.20470949,
+  (float)0.20150517, (float)0.19829892, (float)0.19509031, (float)0.19187963, (float)0.18866688,
+  (float)0.18545210, (float)0.18223536, (float)0.17901689, (float)0.17579627, (float)0.17257376,
+  (float)0.16934940, (float)0.16612324, (float)0.16289529, (float)0.15966584, (float)0.15643445,
+  (float)0.15320137, (float)0.14996666, (float)0.14673033, (float)0.14349243, (float)0.14025325,
+  (float)0.13701232, (float)0.13376991, (float)0.13052608, (float)0.12728085, (float)0.12403426,
+  (float)0.12078657, (float)0.11753736, (float)0.11428688, (float)0.11103519, (float)0.10778230,
+  (float)0.10452849, (float)0.10127334, (float)0.09801710, (float)0.09475980, (float)0.09150149,
+  (float)0.08824220, (float)0.08498220, (float)0.08172106, (float)0.07845904, (float)0.07519618,
+  (float)0.07193252, (float)0.06866808, (float)0.06540315, (float)0.06213728, (float)0.05887074,
+  (float)0.05560357, (float)0.05233581, (float)0.04906749, (float)0.04579888, (float)0.04252954,
+  (float)0.03925974, (float)0.03598953, (float)0.03271893, (float)0.02944798, (float)0.02617695,
+  (float)0.02290541, (float)0.01963361, (float)0.01636161, (float)0.01308943, (float)0.00981712,
+  (float)0.00654493, (float)0.00327244, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000
+};
+
+#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_
diff --git a/src/modules/audio_processing/main/source/processing_component.cc b/src/modules/audio_processing/processing_component.cc
similarity index 100%
rename from src/modules/audio_processing/main/source/processing_component.cc
rename to src/modules/audio_processing/processing_component.cc
diff --git a/src/modules/audio_processing/main/source/processing_component.h b/src/modules/audio_processing/processing_component.h
similarity index 90%
rename from src/modules/audio_processing/main/source/processing_component.h
rename to src/modules/audio_processing/processing_component.h
index 3d8a02b..3af0c4d 100644
--- a/src/modules/audio_processing/main/source/processing_component.h
+++ b/src/modules/audio_processing/processing_component.h
@@ -18,16 +18,6 @@
 namespace webrtc {
 class AudioProcessingImpl;
 
-/*template <class T>
-class ComponentHandle {
-  public:
-    ComponentHandle();
-    virtual ~ComponentHandle();
-
-    virtual int Create() = 0;
-    virtual T* ptr() const = 0;
-};*/
-
 class ProcessingComponent {
  public:
   explicit ProcessingComponent(const AudioProcessingImpl* apm);
@@ -37,10 +27,11 @@
   virtual int Destroy();
   virtual int get_version(char* version, int version_len_bytes) const = 0;
 
+  bool is_component_enabled() const;
+
  protected:
   virtual int Configure();
   int EnableComponent(bool enable);
-  bool is_component_enabled() const;
   void* handle(int index) const;
   int num_handles() const;
 
diff --git a/src/modules/audio_processing/main/source/splitting_filter.cc b/src/modules/audio_processing/splitting_filter.cc
similarity index 100%
rename from src/modules/audio_processing/main/source/splitting_filter.cc
rename to src/modules/audio_processing/splitting_filter.cc
diff --git a/src/modules/audio_processing/main/source/splitting_filter.h b/src/modules/audio_processing/splitting_filter.h
similarity index 100%
rename from src/modules/audio_processing/main/source/splitting_filter.h
rename to src/modules/audio_processing/splitting_filter.h
diff --git a/src/modules/audio_processing/main/test/android/apmtest/AndroidManifest.xml b/src/modules/audio_processing/test/android/apmtest/AndroidManifest.xml
similarity index 100%
rename from src/modules/audio_processing/main/test/android/apmtest/AndroidManifest.xml
rename to src/modules/audio_processing/test/android/apmtest/AndroidManifest.xml
diff --git a/src/modules/audio_processing/main/test/android/apmtest/default.properties b/src/modules/audio_processing/test/android/apmtest/default.properties
similarity index 100%
rename from src/modules/audio_processing/main/test/android/apmtest/default.properties
rename to src/modules/audio_processing/test/android/apmtest/default.properties
diff --git a/src/modules/audio_processing/main/test/android/apmtest/jni/Android.mk b/src/modules/audio_processing/test/android/apmtest/jni/Android.mk
similarity index 100%
rename from src/modules/audio_processing/main/test/android/apmtest/jni/Android.mk
rename to src/modules/audio_processing/test/android/apmtest/jni/Android.mk
diff --git a/src/modules/audio_processing/main/test/android/apmtest/jni/Application.mk b/src/modules/audio_processing/test/android/apmtest/jni/Application.mk
similarity index 100%
rename from src/modules/audio_processing/main/test/android/apmtest/jni/Application.mk
rename to src/modules/audio_processing/test/android/apmtest/jni/Application.mk
diff --git a/src/modules/audio_processing/main/test/android/apmtest/jni/main.c b/src/modules/audio_processing/test/android/apmtest/jni/main.c
similarity index 100%
rename from src/modules/audio_processing/main/test/android/apmtest/jni/main.c
rename to src/modules/audio_processing/test/android/apmtest/jni/main.c
diff --git a/src/modules/audio_processing/main/test/android/apmtest/res/values/strings.xml b/src/modules/audio_processing/test/android/apmtest/res/values/strings.xml
similarity index 100%
rename from src/modules/audio_processing/main/test/android/apmtest/res/values/strings.xml
rename to src/modules/audio_processing/test/android/apmtest/res/values/strings.xml
diff --git a/src/modules/audio_processing/main/test/process_test/apmtest.m b/src/modules/audio_processing/test/apmtest.m
similarity index 90%
rename from src/modules/audio_processing/main/test/process_test/apmtest.m
rename to src/modules/audio_processing/test/apmtest.m
index 6152bb5..3172cd1 100644
--- a/src/modules/audio_processing/main/test/process_test/apmtest.m
+++ b/src/modules/audio_processing/test/apmtest.m
@@ -1,4 +1,4 @@
-function apmtest(task, testname, casenumber, legacy)
+function apmtest(task, testname, filepath, casenumber, legacy)
 %APMTEST is a tool to process APM file sets and easily display the output.
 %   APMTEST(TASK, TESTNAME, CASENUMBER) performs one of several TASKs:
 %     'test'  Processes the files to produce test output.
@@ -17,27 +17,33 @@
 %     'ns'    The NS test set.
 %     'vad'   The VAD test set.
 %
+%   FILEPATH specifies the path to the test data files.
+%
 %   CASENUMBER can be used to select a single test case. Omit CASENUMBER,
 %   or set to zero, to use all test cases.
 %
 
-if nargin < 4
+if nargin < 5 || isempty(legacy)
   % Set to true to run old VQE recordings.
   legacy = false;
 end
 
-if nargin < 3
+if nargin < 4 || isempty(casenumber)
   casenumber = 0;
 end
 
-if nargin < 2
-  task = 'test';
+if nargin < 3 || isempty(filepath)
+  filepath = 'data/';
 end
 
-if nargin < 1
+if nargin < 2 || isempty(testname)
   testname = 'all';
 end
 
+if nargin < 1 || isempty(task)
+  task = 'test';
+end
+
 if ~strcmp(task, 'test') && ~strcmp(task, 'list') && ~strcmp(task, 'show')
   error(['TASK ' task ' is not recognized']);
 end
@@ -46,17 +52,10 @@
   error(['CASENUMBER must be specified for TASK ' task]);
 end
 
-filepath = 'data/';
 inpath = [filepath 'input/'];
 outpath = [filepath 'output/'];
 refpath = [filepath 'reference/'];
 
-% Temporary
-if legacy
-  refpath = [filepath 'output/'];
-  outpath = [filepath 'reference/'];
-end
-
 if strcmp(testname, 'all')
   tests = {'apm','apmm','aec','aecm','agc','ns','vad'};
 else
@@ -64,7 +63,7 @@
 end
 
 if legacy
-  progname = '/usr/local/google/p4/dev/depot/test';
+  progname = './test';
 else
   progname = './process_test';
 end
@@ -127,24 +126,24 @@
     error(['TESTNAME ' tests{i} ' is not recognized']);
   end
 
-  inpath = [inpath testdir];
-  outpath = [outpath testdir];
-  refpath = [refpath testdir];
+  inpathtest = [inpath testdir];
+  outpathtest = [outpath testdir];
+  refpathtest = [refpath testdir];
 
-  if ~exist(inpath,'dir')
-    error(['Input directory ' inpath ' does not exist']);
+  if ~exist(inpathtest,'dir')
+    error(['Input directory ' inpathtest ' does not exist']);
   end
 
-  if ~exist(refpath,'dir')
-    warning(['Reference directory ' refpath ' does not exist']);
+  if ~exist(refpathtest,'dir')
+    warning(['Reference directory ' refpathtest ' does not exist']);
   end
 
-  [status, errMsg] = mkdir(outpath);
+  [status, errMsg] = mkdir(outpathtest);
   if (status == 0)
     error(errMsg);
   end
 
-  [nErr, nCases] = recurseDir(inpath, outpath, refpath, outfile, ...
+  [nErr, nCases] = recurseDir(inpathtest, outpathtest, refpathtest, outfile, ...
       progname, opt, simulateMode, nErr, nCases, task, casenumber, legacy);
 
   if strcmp(task, 'test') || strcmp(task, 'show')
@@ -221,13 +220,13 @@
 
       if exist([inpath 'vqeEvent.dat'])
         system(['ln -s -f ' inpath 'vqeEvent.dat ' eventFile]);
-      elseif exist([inpath 'apm_event.day'])
+      elseif exist([inpath 'apm_event.dat'])
         system(['ln -s -f ' inpath 'apm_event.dat ' eventFile]);
       end
 
       if exist([inpath 'vqeBuf.dat'])
         system(['ln -s -f ' inpath 'vqeBuf.dat ' delayFile]);
-      elseif exist([inpath 'apm_delay.day'])
+      elseif exist([inpath 'apm_delay.dat'])
         system(['ln -s -f ' inpath 'apm_delay.dat ' delayFile]);
       end
 
@@ -296,10 +295,6 @@
               diffvector);
           %spclab(fs, diffvector);
         end
-
-        if vadTest == 1
-          spclab([refpath vadoutfile], [outpath vadoutfile]);
-        end
       end
     end
   end
diff --git a/src/modules/audio_processing/test/process_test.cc b/src/modules/audio_processing/test/process_test.cc
new file mode 100644
index 0000000..2023ddb
--- /dev/null
+++ b/src/modules/audio_processing/test/process_test.cc
@@ -0,0 +1,964 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#ifdef WEBRTC_ANDROID
+#include <sys/stat.h>
+#endif
+
+#include "gtest/gtest.h"
+
+#include "audio_processing.h"
+#include "cpu_features_wrapper.h"
+#include "module_common_types.h"
+#include "scoped_ptr.h"
+#include "tick_util.h"
+#ifdef WEBRTC_ANDROID
+#include "external/webrtc/src/modules/audio_processing/debug.pb.h"
+#else
+#include "webrtc/audio_processing/debug.pb.h"
+#endif
+
+using webrtc::AudioFrame;
+using webrtc::AudioProcessing;
+using webrtc::EchoCancellation;
+using webrtc::GainControl;
+using webrtc::NoiseSuppression;
+using webrtc::scoped_array;
+using webrtc::TickInterval;
+using webrtc::TickTime;
+
+using webrtc::audioproc::Event;
+using webrtc::audioproc::Init;
+using webrtc::audioproc::ReverseStream;
+using webrtc::audioproc::Stream;
+
+namespace {
+// Returns true on success, false on error or end-of-file.
+bool ReadMessageFromFile(FILE* file,
+                        ::google::protobuf::MessageLite* msg) {
+  // The "wire format" for the size is little-endian.
+  // Assume process_test is running on a little-endian machine.
+  int32_t size = 0;
+  if (fread(&size, sizeof(int32_t), 1, file) != 1) {
+    return false;
+  }
+  if (size <= 0) {
+    return false;
+  }
+  const size_t usize = static_cast<size_t>(size);
+
+  scoped_array<char> array(new char[usize]);
+  if (fread(array.get(), sizeof(char), usize, file) != usize) {
+    return false;
+  }
+
+  msg->Clear();
+  return msg->ParseFromArray(array.get(), usize);
+}
+
+void PrintStat(const AudioProcessing::Statistic& stat) {
+  printf("%d, %d, %d\n", stat.average,
+                         stat.maximum,
+                         stat.minimum);
+}
+
+void usage() {
+  printf(
+  "Usage: process_test [options] [-pb PROTOBUF_FILE]\n"
+  "  [-ir REVERSE_FILE] [-i PRIMARY_FILE] [-o OUT_FILE]\n");
+  printf(
+  "process_test is a test application for AudioProcessing.\n\n"
+  "When a protobuf debug file is available, specify it with -pb.\n"
+  "Alternately, when -ir or -i is used, the specified files will be\n"
+  "processed directly in a simulation mode. Otherwise the full set of\n"
+  "legacy test files is expected to be present in the working directory.\n");
+  printf("\n");
+  printf("Options\n");
+  printf("General configuration (only used for the simulation mode):\n");
+  printf("  -fs SAMPLE_RATE_HZ\n");
+  printf("  -ch CHANNELS_IN CHANNELS_OUT\n");
+  printf("  -rch REVERSE_CHANNELS\n");
+  printf("\n");
+  printf("Component configuration:\n");
+  printf(
+  "All components are disabled by default. Each block below begins with a\n"
+  "flag to enable the component with default settings. The subsequent flags\n"
+  "in the block are used to provide configuration settings.\n");
+  printf("\n  -aec     Echo cancellation\n");
+  printf("  --drift_compensation\n");
+  printf("  --no_drift_compensation\n");
+  printf("  --no_echo_metrics\n");
+  printf("  --no_delay_logging\n");
+  printf("\n  -aecm    Echo control mobile\n");
+  printf("  --aecm_echo_path_in_file FILE\n");
+  printf("  --aecm_echo_path_out_file FILE\n");
+  printf("\n  -agc     Gain control\n");
+  printf("  --analog\n");
+  printf("  --adaptive_digital\n");
+  printf("  --fixed_digital\n");
+  printf("  --target_level LEVEL\n");
+  printf("  --compression_gain GAIN\n");
+  printf("  --limiter\n");
+  printf("  --no_limiter\n");
+  printf("\n  -hpf     High pass filter\n");
+  printf("\n  -ns      Noise suppression\n");
+  printf("  --ns_low\n");
+  printf("  --ns_moderate\n");
+  printf("  --ns_high\n");
+  printf("  --ns_very_high\n");
+  printf("\n  -vad     Voice activity detection\n");
+  printf("  --vad_out_file FILE\n");
+  printf("\n Level metrics (enabled by default)\n");
+  printf("  --no_level_metrics\n");
+  printf("\n");
+  printf("Modifiers:\n");
+  printf("  --noasm            Disable SSE optimization.\n");
+  printf("  --delay DELAY      Add DELAY ms to input value.\n");
+  printf("  --perf             Measure performance.\n");
+  printf("  --quiet            Suppress text output.\n");
+  printf("  --no_progress      Suppress progress.\n");
+  printf("  --debug_file FILE  Dump a debug recording.\n");
+}
+
+// void function for gtest.
+void void_main(int argc, char* argv[]) {
+  if (argc > 1 && strcmp(argv[1], "--help") == 0) {
+    usage();
+    return;
+  }
+
+  if (argc < 2) {
+    printf("Did you mean to run without arguments?\n");
+    printf("Try `process_test --help' for more information.\n\n");
+  }
+
+  AudioProcessing* apm = AudioProcessing::Create(0);
+  ASSERT_TRUE(apm != NULL);
+
+  const char* pb_filename = NULL;
+  const char* far_filename = NULL;
+  const char* near_filename = NULL;
+  const char* out_filename = NULL;
+  const char* vad_out_filename = NULL;
+  const char* aecm_echo_path_in_filename = NULL;
+  const char* aecm_echo_path_out_filename = NULL;
+
+  int32_t sample_rate_hz = 16000;
+  int32_t device_sample_rate_hz = 16000;
+
+  int num_capture_input_channels = 1;
+  int num_capture_output_channels = 1;
+  int num_render_channels = 1;
+
+  int samples_per_channel = sample_rate_hz / 100;
+
+  bool simulating = false;
+  bool perf_testing = false;
+  bool verbose = true;
+  bool progress = true;
+  int extra_delay_ms = 0;
+  //bool interleaved = true;
+
+  ASSERT_EQ(apm->kNoError, apm->level_estimator()->Enable(true));
+  for (int i = 1; i < argc; i++) {
+    if (strcmp(argv[i], "-pb") == 0) {
+      i++;
+      ASSERT_LT(i, argc) << "Specify protobuf filename after -pb";
+      pb_filename = argv[i];
+
+    } else if (strcmp(argv[i], "-ir") == 0) {
+      i++;
+      ASSERT_LT(i, argc) << "Specify filename after -ir";
+      far_filename = argv[i];
+      simulating = true;
+
+    } else if (strcmp(argv[i], "-i") == 0) {
+      i++;
+      ASSERT_LT(i, argc) << "Specify filename after -i";
+      near_filename = argv[i];
+      simulating = true;
+
+    } else if (strcmp(argv[i], "-o") == 0) {
+      i++;
+      ASSERT_LT(i, argc) << "Specify filename after -o";
+      out_filename = argv[i];
+
+    } else if (strcmp(argv[i], "-fs") == 0) {
+      i++;
+      ASSERT_LT(i, argc) << "Specify sample rate after -fs";
+      ASSERT_EQ(1, sscanf(argv[i], "%d", &sample_rate_hz));
+      samples_per_channel = sample_rate_hz / 100;
+
+      ASSERT_EQ(apm->kNoError,
+                apm->set_sample_rate_hz(sample_rate_hz));
+
+    } else if (strcmp(argv[i], "-ch") == 0) {
+      i++;
+      ASSERT_LT(i + 1, argc) << "Specify number of channels after -ch";
+      ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_input_channels));
+      i++;
+      ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_output_channels));
+
+      ASSERT_EQ(apm->kNoError,
+                apm->set_num_channels(num_capture_input_channels,
+                                      num_capture_output_channels));
+
+    } else if (strcmp(argv[i], "-rch") == 0) {
+      i++;
+      ASSERT_LT(i, argc) << "Specify number of channels after -rch";
+      ASSERT_EQ(1, sscanf(argv[i], "%d", &num_render_channels));
+
+      ASSERT_EQ(apm->kNoError,
+                apm->set_num_reverse_channels(num_render_channels));
+
+    } else if (strcmp(argv[i], "-aec") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->echo_cancellation()->enable_metrics(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->echo_cancellation()->enable_delay_logging(true));
+
+    } else if (strcmp(argv[i], "--drift_compensation") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+      // TODO(ajm): this is enabled in the VQE test app by default. Investigate
+      //            why it can give better performance despite passing zeros.
+      ASSERT_EQ(apm->kNoError,
+                apm->echo_cancellation()->enable_drift_compensation(true));
+    } else if (strcmp(argv[i], "--no_drift_compensation") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->echo_cancellation()->enable_drift_compensation(false));
+
+    } else if (strcmp(argv[i], "--no_echo_metrics") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->echo_cancellation()->enable_metrics(false));
+
+    } else if (strcmp(argv[i], "--no_delay_logging") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->echo_cancellation()->enable_delay_logging(false));
+
+    } else if (strcmp(argv[i], "--no_level_metrics") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->level_estimator()->Enable(false));
+
+    } else if (strcmp(argv[i], "-aecm") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->echo_control_mobile()->Enable(true));
+
+    } else if (strcmp(argv[i], "--aecm_echo_path_in_file") == 0) {
+      i++;
+      ASSERT_LT(i, argc) << "Specify filename after --aecm_echo_path_in_file";
+      aecm_echo_path_in_filename = argv[i];
+
+    } else if (strcmp(argv[i], "--aecm_echo_path_out_file") == 0) {
+      i++;
+      ASSERT_LT(i, argc) << "Specify filename after --aecm_echo_path_out_file";
+      aecm_echo_path_out_filename = argv[i];
+
+    } else if (strcmp(argv[i], "-agc") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+
+    } else if (strcmp(argv[i], "--analog") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+
+    } else if (strcmp(argv[i], "--adaptive_digital") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+
+    } else if (strcmp(argv[i], "--fixed_digital") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->gain_control()->set_mode(GainControl::kFixedDigital));
+
+    } else if (strcmp(argv[i], "--target_level") == 0) {
+      i++;
+      int level;
+      ASSERT_EQ(1, sscanf(argv[i], "%d", &level));
+
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->gain_control()->set_target_level_dbfs(level));
+
+    } else if (strcmp(argv[i], "--compression_gain") == 0) {
+      i++;
+      int gain;
+      ASSERT_EQ(1, sscanf(argv[i], "%d", &gain));
+
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->gain_control()->set_compression_gain_db(gain));
+
+    } else if (strcmp(argv[i], "--limiter") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->gain_control()->enable_limiter(true));
+
+    } else if (strcmp(argv[i], "--no_limiter") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->gain_control()->enable_limiter(false));
+
+    } else if (strcmp(argv[i], "-hpf") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->high_pass_filter()->Enable(true));
+
+    } else if (strcmp(argv[i], "-ns") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+
+    } else if (strcmp(argv[i], "--ns_low") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+          apm->noise_suppression()->set_level(NoiseSuppression::kLow));
+
+    } else if (strcmp(argv[i], "--ns_moderate") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+          apm->noise_suppression()->set_level(NoiseSuppression::kModerate));
+
+    } else if (strcmp(argv[i], "--ns_high") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+          apm->noise_suppression()->set_level(NoiseSuppression::kHigh));
+
+    } else if (strcmp(argv[i], "--ns_very_high") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+          apm->noise_suppression()->set_level(NoiseSuppression::kVeryHigh));
+
+    } else if (strcmp(argv[i], "-vad") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
+
+    } else if (strcmp(argv[i], "--vad_out_file") == 0) {
+      i++;
+      ASSERT_LT(i, argc) << "Specify filename after --vad_out_file";
+      vad_out_filename = argv[i];
+
+    } else if (strcmp(argv[i], "--noasm") == 0) {
+      WebRtc_GetCPUInfo = WebRtc_GetCPUInfoNoASM;
+      // We need to reinitialize here if components have already been enabled.
+      ASSERT_EQ(apm->kNoError, apm->Initialize());
+
+    } else if (strcmp(argv[i], "--delay") == 0) {
+      i++;
+      ASSERT_EQ(1, sscanf(argv[i], "%d", &extra_delay_ms));
+
+    } else if (strcmp(argv[i], "--perf") == 0) {
+      perf_testing = true;
+
+    } else if (strcmp(argv[i], "--quiet") == 0) {
+      verbose = false;
+      progress = false;
+
+    } else if (strcmp(argv[i], "--no_progress") == 0) {
+      progress = false;
+
+    } else if (strcmp(argv[i], "--debug_file") == 0) {
+      i++;
+      ASSERT_LT(i, argc) << "Specify filename after --debug_file";
+      ASSERT_EQ(apm->kNoError, apm->StartDebugRecording(argv[i]));
+    } else {
+      FAIL() << "Unrecognized argument " << argv[i];
+    }
+  }
+  // If we're reading a protobuf file, ensure a simulation hasn't also
+  // been requested (which makes no sense...)
+  ASSERT_FALSE(pb_filename && simulating);
+
+  if (verbose) {
+    printf("Sample rate: %d Hz\n", sample_rate_hz);
+    printf("Primary channels: %d (in), %d (out)\n",
+           num_capture_input_channels,
+           num_capture_output_channels);
+    printf("Reverse channels: %d \n", num_render_channels);
+  }
+
+  const char far_file_default[] = "apm_far.pcm";
+  const char near_file_default[] = "apm_near.pcm";
+  const char out_file_default[] = "out.pcm";
+  const char event_filename[] = "apm_event.dat";
+  const char delay_filename[] = "apm_delay.dat";
+  const char drift_filename[] = "apm_drift.dat";
+  const char vad_file_default[] = "vad_out.dat";
+
+  if (!simulating) {
+    far_filename = far_file_default;
+    near_filename = near_file_default;
+  }
+
+  if (!out_filename) {
+    out_filename = out_file_default;
+  }
+
+  if (!vad_out_filename) {
+    vad_out_filename = vad_file_default;
+  }
+
+  FILE* pb_file = NULL;
+  FILE* far_file = NULL;
+  FILE* near_file = NULL;
+  FILE* out_file = NULL;
+  FILE* event_file = NULL;
+  FILE* delay_file = NULL;
+  FILE* drift_file = NULL;
+  FILE* vad_out_file = NULL;
+  FILE* aecm_echo_path_in_file = NULL;
+  FILE* aecm_echo_path_out_file = NULL;
+
+  if (pb_filename) {
+    pb_file = fopen(pb_filename, "rb");
+    ASSERT_TRUE(NULL != pb_file) << "Unable to open protobuf file "
+                                 << pb_filename;
+  } else {
+    if (far_filename) {
+      far_file = fopen(far_filename, "rb");
+      ASSERT_TRUE(NULL != far_file) << "Unable to open far-end audio file "
+                                    << far_filename;
+    }
+
+    near_file = fopen(near_filename, "rb");
+    ASSERT_TRUE(NULL != near_file) << "Unable to open near-end audio file "
+                                   << near_filename;
+    if (!simulating) {
+      event_file = fopen(event_filename, "rb");
+      ASSERT_TRUE(NULL != event_file) << "Unable to open event file "
+                                      << event_filename;
+
+      delay_file = fopen(delay_filename, "rb");
+      ASSERT_TRUE(NULL != delay_file) << "Unable to open buffer file "
+                                      << delay_filename;
+
+      drift_file = fopen(drift_filename, "rb");
+      ASSERT_TRUE(NULL != drift_file) << "Unable to open drift file "
+                                      << drift_filename;
+    }
+  }
+
+  out_file = fopen(out_filename, "wb");
+  ASSERT_TRUE(NULL != out_file) << "Unable to open output audio file "
+                                << out_filename;
+
+  int near_size_bytes = 0;
+  if (pb_file) {
+    struct stat st;
+    stat(pb_filename, &st);
+    // Crude estimate, but should be good enough.
+    near_size_bytes = st.st_size / 3;
+  } else {
+    struct stat st;
+    stat(near_filename, &st);
+    near_size_bytes = st.st_size;
+  }
+
+  if (apm->voice_detection()->is_enabled()) {
+    vad_out_file = fopen(vad_out_filename, "wb");
+    ASSERT_TRUE(NULL != vad_out_file) << "Unable to open VAD output file "
+                                      << vad_out_file;
+  }
+
+  if (aecm_echo_path_in_filename != NULL) {
+    aecm_echo_path_in_file = fopen(aecm_echo_path_in_filename, "rb");
+    ASSERT_TRUE(NULL != aecm_echo_path_in_file) << "Unable to open file "
+                                                << aecm_echo_path_in_filename;
+
+    const size_t path_size =
+        apm->echo_control_mobile()->echo_path_size_bytes();
+    scoped_array<char> echo_path(new char[path_size]);
+    ASSERT_EQ(path_size, fread(echo_path.get(),
+                               sizeof(char),
+                               path_size,
+                               aecm_echo_path_in_file));
+    EXPECT_EQ(apm->kNoError,
+              apm->echo_control_mobile()->SetEchoPath(echo_path.get(),
+                                                      path_size));
+    fclose(aecm_echo_path_in_file);
+    aecm_echo_path_in_file = NULL;
+  }
+
+  if (aecm_echo_path_out_filename != NULL) {
+    aecm_echo_path_out_file = fopen(aecm_echo_path_out_filename, "wb");
+    ASSERT_TRUE(NULL != aecm_echo_path_out_file) << "Unable to open file "
+                                                 << aecm_echo_path_out_filename;
+  }
+
+  size_t read_count = 0;
+  int reverse_count = 0;
+  int primary_count = 0;
+  int near_read_bytes = 0;
+  TickInterval acc_ticks;
+
+  AudioFrame far_frame;
+  AudioFrame near_frame;
+
+  int delay_ms = 0;
+  int drift_samples = 0;
+  int capture_level = 127;
+  int8_t stream_has_voice = 0;
+
+  TickTime t0 = TickTime::Now();
+  TickTime t1 = t0;
+  WebRtc_Word64 max_time_us = 0;
+  WebRtc_Word64 max_time_reverse_us = 0;
+  WebRtc_Word64 min_time_us = 1e6;
+  WebRtc_Word64 min_time_reverse_us = 1e6;
+
+  // TODO(ajm): Ideally we would refactor this block into separate functions,
+  //            but for now we want to share the variables.
+  if (pb_file) {
+    Event event_msg;
+    while (ReadMessageFromFile(pb_file, &event_msg)) {
+      std::ostringstream trace_stream;
+      trace_stream << "Processed frames: " << reverse_count << " (reverse), "
+                   << primary_count << " (primary)";
+      SCOPED_TRACE(trace_stream.str());
+
+      if (event_msg.type() == Event::INIT) {
+        ASSERT_TRUE(event_msg.has_init());
+        const Init msg = event_msg.init();
+
+        ASSERT_TRUE(msg.has_sample_rate());
+        ASSERT_EQ(apm->kNoError,
+            apm->set_sample_rate_hz(msg.sample_rate()));
+
+        ASSERT_TRUE(msg.has_device_sample_rate());
+        ASSERT_EQ(apm->kNoError,
+                  apm->echo_cancellation()->set_device_sample_rate_hz(
+                      msg.device_sample_rate()));
+
+        ASSERT_TRUE(msg.has_num_input_channels());
+        ASSERT_TRUE(msg.has_num_output_channels());
+        ASSERT_EQ(apm->kNoError,
+            apm->set_num_channels(msg.num_input_channels(),
+                                  msg.num_output_channels()));
+
+        ASSERT_TRUE(msg.has_num_reverse_channels());
+        ASSERT_EQ(apm->kNoError,
+            apm->set_num_reverse_channels(msg.num_reverse_channels()));
+
+        samples_per_channel = msg.sample_rate() / 100;
+        far_frame._frequencyInHz = msg.sample_rate();
+        far_frame._payloadDataLengthInSamples = samples_per_channel;
+        far_frame._audioChannel = msg.num_reverse_channels();
+        near_frame._frequencyInHz = msg.sample_rate();
+        near_frame._payloadDataLengthInSamples = samples_per_channel;
+
+        if (verbose) {
+          printf("Init at frame: %d (primary), %d (reverse)\n",
+              primary_count, reverse_count);
+          printf("  Sample rate: %d Hz\n", msg.sample_rate());
+          printf("  Primary channels: %d (in), %d (out)\n",
+                 msg.num_input_channels(),
+                 msg.num_output_channels());
+          printf("  Reverse channels: %d \n", msg.num_reverse_channels());
+        }
+
+      } else if (event_msg.type() == Event::REVERSE_STREAM) {
+        ASSERT_TRUE(event_msg.has_reverse_stream());
+        const ReverseStream msg = event_msg.reverse_stream();
+        reverse_count++;
+
+        ASSERT_TRUE(msg.has_data());
+        ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
+            far_frame._audioChannel, msg.data().size());
+        memcpy(far_frame._payloadData, msg.data().data(), msg.data().size());
+
+        if (perf_testing) {
+          t0 = TickTime::Now();
+        }
+
+        ASSERT_EQ(apm->kNoError,
+                  apm->AnalyzeReverseStream(&far_frame));
+
+        if (perf_testing) {
+          t1 = TickTime::Now();
+          TickInterval tick_diff = t1 - t0;
+          acc_ticks += tick_diff;
+          if (tick_diff.Microseconds() > max_time_reverse_us) {
+            max_time_reverse_us = tick_diff.Microseconds();
+          }
+          if (tick_diff.Microseconds() < min_time_reverse_us) {
+            min_time_reverse_us = tick_diff.Microseconds();
+          }
+        }
+
+      } else if (event_msg.type() == Event::STREAM) {
+        ASSERT_TRUE(event_msg.has_stream());
+        const Stream msg = event_msg.stream();
+        primary_count++;
+
+        // ProcessStream could have changed this for the output frame.
+        near_frame._audioChannel = apm->num_input_channels();
+
+        ASSERT_TRUE(msg.has_input_data());
+        ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
+            near_frame._audioChannel, msg.input_data().size());
+        memcpy(near_frame._payloadData,
+               msg.input_data().data(),
+               msg.input_data().size());
+
+        near_read_bytes += msg.input_data().size();
+        if (progress && primary_count % 100 == 0) {
+          printf("%.0f%% complete\r",
+              (near_read_bytes * 100.0) / near_size_bytes);
+          fflush(stdout);
+        }
+
+        if (perf_testing) {
+          t0 = TickTime::Now();
+        }
+
+        ASSERT_EQ(apm->kNoError,
+                  apm->gain_control()->set_stream_analog_level(msg.level()));
+        ASSERT_EQ(apm->kNoError,
+                  apm->set_stream_delay_ms(msg.delay() + extra_delay_ms));
+        ASSERT_EQ(apm->kNoError,
+            apm->echo_cancellation()->set_stream_drift_samples(msg.drift()));
+
+        int err = apm->ProcessStream(&near_frame);
+        if (err == apm->kBadStreamParameterWarning) {
+          printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
+        }
+        ASSERT_TRUE(err == apm->kNoError ||
+                    err == apm->kBadStreamParameterWarning);
+        ASSERT_TRUE(near_frame._audioChannel == apm->num_output_channels());
+
+        capture_level = apm->gain_control()->stream_analog_level();
+
+        stream_has_voice =
+            static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
+        if (vad_out_file != NULL) {
+          ASSERT_EQ(1u, fwrite(&stream_has_voice,
+                               sizeof(stream_has_voice),
+                               1,
+                               vad_out_file));
+        }
+
+        if (apm->gain_control()->mode() != GainControl::kAdaptiveAnalog) {
+          ASSERT_EQ(msg.level(), capture_level);
+        }
+
+        if (perf_testing) {
+          t1 = TickTime::Now();
+          TickInterval tick_diff = t1 - t0;
+          acc_ticks += tick_diff;
+          if (tick_diff.Microseconds() > max_time_us) {
+            max_time_us = tick_diff.Microseconds();
+          }
+          if (tick_diff.Microseconds() < min_time_us) {
+            min_time_us = tick_diff.Microseconds();
+          }
+        }
+
+        size_t size = samples_per_channel * near_frame._audioChannel;
+        ASSERT_EQ(size, fwrite(near_frame._payloadData,
+                               sizeof(int16_t),
+                               size,
+                               out_file));
+      }
+    }
+
+    ASSERT_TRUE(feof(pb_file));
+
+  } else {
+    enum Events {
+      kInitializeEvent,
+      kRenderEvent,
+      kCaptureEvent,
+      kResetEventDeprecated
+    };
+    int16_t event = 0;
+    while (simulating || feof(event_file) == 0) {
+      std::ostringstream trace_stream;
+      trace_stream << "Processed frames: " << reverse_count << " (reverse), "
+                   << primary_count << " (primary)";
+      SCOPED_TRACE(trace_stream.str());
+
+      if (simulating) {
+        if (far_file == NULL) {
+          event = kCaptureEvent;
+        } else {
+          if (event == kRenderEvent) {
+            event = kCaptureEvent;
+          } else {
+            event = kRenderEvent;
+          }
+        }
+      } else {
+        read_count = fread(&event, sizeof(event), 1, event_file);
+        if (read_count != 1) {
+          break;
+        }
+      }
+
+      far_frame._frequencyInHz = sample_rate_hz;
+      far_frame._payloadDataLengthInSamples = samples_per_channel;
+      far_frame._audioChannel = num_render_channels;
+      near_frame._frequencyInHz = sample_rate_hz;
+      near_frame._payloadDataLengthInSamples = samples_per_channel;
+
+      if (event == kInitializeEvent || event == kResetEventDeprecated) {
+        ASSERT_EQ(1u,
+            fread(&sample_rate_hz, sizeof(sample_rate_hz), 1, event_file));
+        samples_per_channel = sample_rate_hz / 100;
+
+        ASSERT_EQ(1u,
+            fread(&device_sample_rate_hz,
+                  sizeof(device_sample_rate_hz),
+                  1,
+                  event_file));
+
+        ASSERT_EQ(apm->kNoError,
+            apm->set_sample_rate_hz(sample_rate_hz));
+
+        ASSERT_EQ(apm->kNoError,
+                  apm->echo_cancellation()->set_device_sample_rate_hz(
+                      device_sample_rate_hz));
+
+        far_frame._frequencyInHz = sample_rate_hz;
+        far_frame._payloadDataLengthInSamples = samples_per_channel;
+        far_frame._audioChannel = num_render_channels;
+        near_frame._frequencyInHz = sample_rate_hz;
+        near_frame._payloadDataLengthInSamples = samples_per_channel;
+
+        if (verbose) {
+          printf("Init at frame: %d (primary), %d (reverse)\n",
+              primary_count, reverse_count);
+          printf("  Sample rate: %d Hz\n", sample_rate_hz);
+        }
+
+      } else if (event == kRenderEvent) {
+        reverse_count++;
+
+        size_t size = samples_per_channel * num_render_channels;
+        read_count = fread(far_frame._payloadData,
+                           sizeof(int16_t),
+                           size,
+                           far_file);
+
+        if (simulating) {
+          if (read_count != size) {
+            // Read an equal amount from the near file to avoid errors due to
+            // not reaching end-of-file.
+            EXPECT_EQ(0, fseek(near_file, read_count * sizeof(int16_t),
+                      SEEK_CUR));
+            break; // This is expected.
+          }
+        } else {
+          ASSERT_EQ(size, read_count);
+        }
+
+        if (perf_testing) {
+          t0 = TickTime::Now();
+        }
+
+        ASSERT_EQ(apm->kNoError,
+                  apm->AnalyzeReverseStream(&far_frame));
+
+        if (perf_testing) {
+          t1 = TickTime::Now();
+          TickInterval tick_diff = t1 - t0;
+          acc_ticks += tick_diff;
+          if (tick_diff.Microseconds() > max_time_reverse_us) {
+            max_time_reverse_us = tick_diff.Microseconds();
+          }
+          if (tick_diff.Microseconds() < min_time_reverse_us) {
+            min_time_reverse_us = tick_diff.Microseconds();
+          }
+        }
+
+      } else if (event == kCaptureEvent) {
+        primary_count++;
+        near_frame._audioChannel = num_capture_input_channels;
+
+        size_t size = samples_per_channel * num_capture_input_channels;
+        read_count = fread(near_frame._payloadData,
+                           sizeof(int16_t),
+                           size,
+                           near_file);
+
+        near_read_bytes += read_count * sizeof(int16_t);
+        if (progress && primary_count % 100 == 0) {
+          printf("%.0f%% complete\r",
+              (near_read_bytes * 100.0) / near_size_bytes);
+          fflush(stdout);
+        }
+        if (simulating) {
+          if (read_count != size) {
+            break; // This is expected.
+          }
+
+          delay_ms = 0;
+          drift_samples = 0;
+        } else {
+          ASSERT_EQ(size, read_count);
+
+          // TODO(ajm): sizeof(delay_ms) for current files?
+          ASSERT_EQ(1u,
+              fread(&delay_ms, 2, 1, delay_file));
+          ASSERT_EQ(1u,
+              fread(&drift_samples, sizeof(drift_samples), 1, drift_file));
+        }
+
+        if (perf_testing) {
+          t0 = TickTime::Now();
+        }
+
+        // TODO(ajm): fake an analog gain while simulating.
+
+        int capture_level_in = capture_level;
+        ASSERT_EQ(apm->kNoError,
+                  apm->gain_control()->set_stream_analog_level(capture_level));
+        ASSERT_EQ(apm->kNoError,
+                  apm->set_stream_delay_ms(delay_ms + extra_delay_ms));
+        ASSERT_EQ(apm->kNoError,
+            apm->echo_cancellation()->set_stream_drift_samples(drift_samples));
+
+        int err = apm->ProcessStream(&near_frame);
+        if (err == apm->kBadStreamParameterWarning) {
+          printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
+        }
+        ASSERT_TRUE(err == apm->kNoError ||
+                    err == apm->kBadStreamParameterWarning);
+        ASSERT_TRUE(near_frame._audioChannel == apm->num_output_channels());
+
+        capture_level = apm->gain_control()->stream_analog_level();
+
+        stream_has_voice =
+            static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
+        if (vad_out_file != NULL) {
+          ASSERT_EQ(1u, fwrite(&stream_has_voice,
+                               sizeof(stream_has_voice),
+                               1,
+                               vad_out_file));
+        }
+
+        if (apm->gain_control()->mode() != GainControl::kAdaptiveAnalog) {
+          ASSERT_EQ(capture_level_in, capture_level);
+        }
+
+        if (perf_testing) {
+          t1 = TickTime::Now();
+          TickInterval tick_diff = t1 - t0;
+          acc_ticks += tick_diff;
+          if (tick_diff.Microseconds() > max_time_us) {
+            max_time_us = tick_diff.Microseconds();
+          }
+          if (tick_diff.Microseconds() < min_time_us) {
+            min_time_us = tick_diff.Microseconds();
+          }
+        }
+
+        size = samples_per_channel * near_frame._audioChannel;
+        ASSERT_EQ(size, fwrite(near_frame._payloadData,
+                               sizeof(int16_t),
+                               size,
+                               out_file));
+      }
+      else {
+        FAIL() << "Event " << event << " is unrecognized";
+      }
+    }
+  }
+  printf("100%% complete\r");
+
+  if (aecm_echo_path_out_file != NULL) {
+    const size_t path_size =
+        apm->echo_control_mobile()->echo_path_size_bytes();
+    scoped_array<char> echo_path(new char[path_size]);
+    apm->echo_control_mobile()->GetEchoPath(echo_path.get(), path_size);
+    ASSERT_EQ(path_size, fwrite(echo_path.get(),
+                                sizeof(char),
+                                path_size,
+                                aecm_echo_path_out_file));
+    fclose(aecm_echo_path_out_file);
+    aecm_echo_path_out_file = NULL;
+  }
+
+  if (verbose) {
+    printf("\nProcessed frames: %d (primary), %d (reverse)\n",
+        primary_count, reverse_count);
+
+    if (apm->level_estimator()->is_enabled()) {
+      printf("\n--Level metrics--\n");
+      printf("RMS: %d dBFS\n", -apm->level_estimator()->RMS());
+    }
+    if (apm->echo_cancellation()->are_metrics_enabled()) {
+      EchoCancellation::Metrics metrics;
+      apm->echo_cancellation()->GetMetrics(&metrics);
+      printf("\n--Echo metrics--\n");
+      printf("(avg, max, min)\n");
+      printf("ERL:  ");
+      PrintStat(metrics.echo_return_loss);
+      printf("ERLE: ");
+      PrintStat(metrics.echo_return_loss_enhancement);
+      printf("ANLP: ");
+      PrintStat(metrics.a_nlp);
+    }
+    if (apm->echo_cancellation()->is_delay_logging_enabled()) {
+      int median = 0;
+      int std = 0;
+      apm->echo_cancellation()->GetDelayMetrics(&median, &std);
+      printf("\n--Delay metrics--\n");
+      printf("Median:             %3d\n", median);
+      printf("Standard deviation: %3d\n", std);
+    }
+  }
+
+  if (!pb_file) {
+    int8_t temp_int8;
+    if (far_file) {
+      read_count = fread(&temp_int8, sizeof(temp_int8), 1, far_file);
+      EXPECT_NE(0, feof(far_file)) << "Far-end file not fully processed";
+    }
+
+    read_count = fread(&temp_int8, sizeof(temp_int8), 1, near_file);
+    EXPECT_NE(0, feof(near_file)) << "Near-end file not fully processed";
+
+    if (!simulating) {
+      read_count = fread(&temp_int8, sizeof(temp_int8), 1, event_file);
+      EXPECT_NE(0, feof(event_file)) << "Event file not fully processed";
+      read_count = fread(&temp_int8, sizeof(temp_int8), 1, delay_file);
+      EXPECT_NE(0, feof(delay_file)) << "Delay file not fully processed";
+      read_count = fread(&temp_int8, sizeof(temp_int8), 1, drift_file);
+      EXPECT_NE(0, feof(drift_file)) << "Drift file not fully processed";
+    }
+  }
+
+  if (perf_testing) {
+    if (primary_count > 0) {
+      WebRtc_Word64 exec_time = acc_ticks.Milliseconds();
+      printf("\nTotal time: %.3f s, file time: %.2f s\n",
+        exec_time * 0.001, primary_count * 0.01);
+      printf("Time per frame: %.3f ms (average), %.3f ms (max),"
+             " %.3f ms (min)\n",
+          (exec_time * 1.0) / primary_count,
+          (max_time_us + max_time_reverse_us) / 1000.0,
+          (min_time_us + min_time_reverse_us) / 1000.0);
+    } else {
+      printf("Warning: no capture frames\n");
+    }
+  }
+
+  AudioProcessing::Destroy(apm);
+  apm = NULL;
+}
+}  // namespace
+
+int main(int argc, char* argv[])
+{
+  void_main(argc, argv);
+
+  // Optional, but removes memory leak noise from Valgrind.
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}
diff --git a/src/modules/audio_processing/test/unit_test.cc b/src/modules/audio_processing/test/unit_test.cc
new file mode 100644
index 0000000..6fe5905
--- /dev/null
+++ b/src/modules/audio_processing/test/unit_test.cc
@@ -0,0 +1,1256 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include "gtest/gtest.h"
+
+#include "audio_processing.h"
+#include "event_wrapper.h"
+#include "module_common_types.h"
+#include "scoped_ptr.h"
+#include "signal_processing_library.h"
+#include "testsupport/fileutils.h"
+#include "thread_wrapper.h"
+#include "trace.h"
+#ifdef WEBRTC_ANDROID
+#include "external/webrtc/src/modules/audio_processing/test/unittest.pb.h"
+#else
+#include "webrtc/audio_processing/unittest.pb.h"
+#endif
+
+using webrtc::AudioProcessing;
+using webrtc::AudioFrame;
+using webrtc::GainControl;
+using webrtc::NoiseSuppression;
+using webrtc::EchoCancellation;
+using webrtc::EventWrapper;
+using webrtc::scoped_array;
+using webrtc::Trace;
+using webrtc::LevelEstimator;
+using webrtc::EchoCancellation;
+using webrtc::EchoControlMobile;
+using webrtc::VoiceDetection;
+
+namespace {
+// When false, this will compare the output data with the results stored to
+// file. This is the typical case. When the file should be updated, it can
+// be set to true with the command-line switch --write_output_data.
+bool write_output_data = false;
+
+class ApmTest : public ::testing::Test {
+ protected:
+  ApmTest();
+  virtual void SetUp();
+  virtual void TearDown();
+
+  static void SetUpTestCase() {
+    Trace::CreateTrace();
+    std::string trace_filename = webrtc::test::OutputPath() +
+      "audioproc_trace.txt";
+    ASSERT_EQ(0, Trace::SetTraceFile(trace_filename.c_str()));
+  }
+
+  static void TearDownTestCase() {
+    Trace::ReturnTrace();
+  }
+  // Path to where the resource files to be used for this test are located.
+  const std::string resource_path;
+  const std::string output_filename;
+  webrtc::AudioProcessing* apm_;
+  webrtc::AudioFrame* frame_;
+  webrtc::AudioFrame* revframe_;
+  FILE* far_file_;
+  FILE* near_file_;
+};
+
+ApmTest::ApmTest()
+    : resource_path(webrtc::test::ProjectRootPath() +
+                    "test/data/audio_processing/"),
+#if defined(WEBRTC_APM_UNIT_TEST_FIXED_PROFILE)
+      output_filename(resource_path + "output_data_fixed.pb"),
+#elif defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+      output_filename(resource_path + "output_data_float.pb"),
+#endif
+      apm_(NULL),
+      frame_(NULL),
+      revframe_(NULL),
+      far_file_(NULL),
+      near_file_(NULL) {}
+
+void ApmTest::SetUp() {
+  apm_ = AudioProcessing::Create(0);
+  ASSERT_TRUE(apm_ != NULL);
+
+  frame_ = new AudioFrame();
+  revframe_ = new AudioFrame();
+
+  ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+  ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(2, 2));
+  ASSERT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(2));
+
+  frame_->_payloadDataLengthInSamples = 320;
+  frame_->_audioChannel = 2;
+  frame_->_frequencyInHz = 32000;
+  revframe_->_payloadDataLengthInSamples = 320;
+  revframe_->_audioChannel = 2;
+  revframe_->_frequencyInHz = 32000;
+
+  std::string input_filename = resource_path + "aec_far.pcm";
+  far_file_ = fopen(input_filename.c_str(), "rb");
+  ASSERT_TRUE(far_file_ != NULL) << "Could not open input file " <<
+      input_filename << "\n";
+  input_filename = resource_path + "aec_near.pcm";
+  near_file_ = fopen(input_filename.c_str(), "rb");
+  ASSERT_TRUE(near_file_ != NULL) << "Could not open input file " <<
+        input_filename << "\n";
+}
+
+void ApmTest::TearDown() {
+  if (frame_) {
+    delete frame_;
+  }
+  frame_ = NULL;
+
+  if (revframe_) {
+    delete revframe_;
+  }
+  revframe_ = NULL;
+
+  if (far_file_) {
+    ASSERT_EQ(0, fclose(far_file_));
+  }
+  far_file_ = NULL;
+
+  if (near_file_) {
+    ASSERT_EQ(0, fclose(near_file_));
+  }
+  near_file_ = NULL;
+
+  if (apm_ != NULL) {
+    AudioProcessing::Destroy(apm_);
+  }
+  apm_ = NULL;
+}
+
+void MixStereoToMono(const int16_t* stereo,
+                     int16_t* mono,
+                     int samples_per_channel) {
+  for (int i = 0; i < samples_per_channel; i++) {
+    int32_t int32 = (static_cast<int32_t>(stereo[i * 2]) +
+                     static_cast<int32_t>(stereo[i * 2 + 1])) >> 1;
+    mono[i] = static_cast<int16_t>(int32);
+  }
+}
+
+template <class T>
+T MaxValue(T a, T b) {
+  return a > b ? a : b;
+}
+
+template <class T>
+T AbsValue(T a) {
+  return a > 0 ? a : -a;
+}
+
+void SetFrameTo(AudioFrame* frame, int16_t value) {
+  for (int i = 0; i < frame->_payloadDataLengthInSamples * frame->_audioChannel;
+      ++i) {
+    frame->_payloadData[i] = value;
+  }
+}
+
+int16_t MaxAudioFrame(const AudioFrame& frame) {
+  const int length = frame._payloadDataLengthInSamples * frame._audioChannel;
+  int16_t max = AbsValue(frame._payloadData[0]);
+  for (int i = 1; i < length; i++) {
+    max = MaxValue(max, AbsValue(frame._payloadData[i]));
+  }
+
+  return max;
+}
+
+bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
+  if (frame1._payloadDataLengthInSamples !=
+      frame2._payloadDataLengthInSamples) {
+    return false;
+  }
+  if (frame1._audioChannel !=
+      frame2._audioChannel) {
+    return false;
+  }
+  if (memcmp(frame1._payloadData, frame2._payloadData,
+             frame1._payloadDataLengthInSamples * frame1._audioChannel *
+               sizeof(int16_t))) {
+    return false;
+  }
+  return true;
+}
+
+void TestStats(const AudioProcessing::Statistic& test,
+               const webrtc::audioproc::Test::Statistic& reference) {
+  EXPECT_EQ(reference.instant(), test.instant);
+  EXPECT_EQ(reference.average(), test.average);
+  EXPECT_EQ(reference.maximum(), test.maximum);
+  EXPECT_EQ(reference.minimum(), test.minimum);
+}
+
+void WriteStatsMessage(const AudioProcessing::Statistic& output,
+                       webrtc::audioproc::Test::Statistic* message) {
+  message->set_instant(output.instant);
+  message->set_average(output.average);
+  message->set_maximum(output.maximum);
+  message->set_minimum(output.minimum);
+}
+
+void WriteMessageLiteToFile(const std::string filename,
+                            const ::google::protobuf::MessageLite& message) {
+  FILE* file = fopen(filename.c_str(), "wb");
+  ASSERT_TRUE(file != NULL) << "Could not open " << filename;
+  int size = message.ByteSize();
+  ASSERT_GT(size, 0);
+  unsigned char* array = new unsigned char[size];
+  ASSERT_TRUE(message.SerializeToArray(array, size));
+
+  ASSERT_EQ(1u, fwrite(&size, sizeof(int), 1, file));
+  ASSERT_EQ(static_cast<size_t>(size),
+      fwrite(array, sizeof(unsigned char), size, file));
+
+  delete [] array;
+  fclose(file);
+}
+
+void ReadMessageLiteFromFile(const std::string filename,
+                             ::google::protobuf::MessageLite* message) {
+  assert(message != NULL);
+
+  FILE* file = fopen(filename.c_str(), "rb");
+  ASSERT_TRUE(file != NULL) << "Could not open " << filename;
+  int size = 0;
+  ASSERT_EQ(1u, fread(&size, sizeof(int), 1, file));
+  ASSERT_GT(size, 0);
+  unsigned char* array = new unsigned char[size];
+  ASSERT_EQ(static_cast<size_t>(size),
+      fread(array, sizeof(unsigned char), size, file));
+
+  ASSERT_TRUE(message->ParseFromArray(array, size));
+
+  delete [] array;
+  fclose(file);
+}
+
+struct ThreadData {
+  ThreadData(int thread_num_, AudioProcessing* ap_)
+      : thread_num(thread_num_),
+        error(false),
+        ap(ap_) {}
+  int thread_num;
+  bool error;
+  AudioProcessing* ap;
+};
+
+// Don't use GTest here; non-thread-safe on Windows (as of 1.5.0).
+bool DeadlockProc(void* thread_object) {
+  ThreadData* thread_data = static_cast<ThreadData*>(thread_object);
+  AudioProcessing* ap = thread_data->ap;
+  int err = ap->kNoError;
+
+  AudioFrame primary_frame;
+  AudioFrame reverse_frame;
+  primary_frame._payloadDataLengthInSamples = 320;
+  primary_frame._audioChannel = 2;
+  primary_frame._frequencyInHz = 32000;
+  reverse_frame._payloadDataLengthInSamples = 320;
+  reverse_frame._audioChannel = 2;
+  reverse_frame._frequencyInHz = 32000;
+
+  ap->echo_cancellation()->Enable(true);
+  ap->gain_control()->Enable(true);
+  ap->high_pass_filter()->Enable(true);
+  ap->level_estimator()->Enable(true);
+  ap->noise_suppression()->Enable(true);
+  ap->voice_detection()->Enable(true);
+
+  if (thread_data->thread_num % 2 == 0) {
+    err = ap->AnalyzeReverseStream(&reverse_frame);
+    if (err != ap->kNoError) {
+      printf("Error in AnalyzeReverseStream(): %d\n", err);
+      thread_data->error = true;
+      return false;
+    }
+  }
+
+  if (thread_data->thread_num % 2 == 1) {
+    ap->set_stream_delay_ms(0);
+    ap->echo_cancellation()->set_stream_drift_samples(0);
+    ap->gain_control()->set_stream_analog_level(0);
+    err = ap->ProcessStream(&primary_frame);
+    if (err == ap->kStreamParameterNotSetError) {
+      printf("Expected kStreamParameterNotSetError in ProcessStream(): %d\n",
+          err);
+    } else if (err != ap->kNoError) {
+      printf("Error in ProcessStream(): %d\n", err);
+      thread_data->error = true;
+      return false;
+    }
+    ap->gain_control()->stream_analog_level();
+  }
+
+  EventWrapper* event = EventWrapper::Create();
+  event->Wait(1);
+  delete event;
+  event = NULL;
+
+  return true;
+}
+
+/*TEST_F(ApmTest, Deadlock) {
+  const int num_threads = 16;
+  std::vector<ThreadWrapper*> threads(num_threads);
+  std::vector<ThreadData*> thread_data(num_threads);
+
+  ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+  ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(2, 2));
+  ASSERT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(2));
+
+  for (int i = 0; i < num_threads; i++) {
+    thread_data[i] = new ThreadData(i, apm_);
+    threads[i] = ThreadWrapper::CreateThread(DeadlockProc,
+                                             thread_data[i],
+                                             kNormalPriority,
+                                             0);
+    ASSERT_TRUE(threads[i] != NULL);
+    unsigned int thread_id = 0;
+    threads[i]->Start(thread_id);
+  }
+
+  EventWrapper* event = EventWrapper::Create();
+  ASSERT_EQ(kEventTimeout, event->Wait(5000));
+  delete event;
+  event = NULL;
+
+  for (int i = 0; i < num_threads; i++) {
+    // This will return false if the thread has deadlocked.
+    ASSERT_TRUE(threads[i]->Stop());
+    ASSERT_FALSE(thread_data[i]->error);
+    delete threads[i];
+    threads[i] = NULL;
+    delete thread_data[i];
+    thread_data[i] = NULL;
+  }
+}*/
+
+TEST_F(ApmTest, StreamParameters) {
+  // No errors when the components are disabled.
+  EXPECT_EQ(apm_->kNoError,
+            apm_->ProcessStream(frame_));
+
+  // -- Missing AGC level --
+  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+  // Resets after successful ProcessStream().
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_stream_analog_level(127));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+  // Other stream parameters set correctly.
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(true));
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->set_stream_drift_samples(0));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(false));
+
+  // -- Missing delay --
+  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+  // Resets after successful ProcessStream().
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+  // Other stream parameters set correctly.
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(true));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->set_stream_drift_samples(0));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_stream_analog_level(127));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+
+  // -- Missing drift --
+  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+  // Resets after successful ProcessStream().
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->set_stream_drift_samples(0));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+  // Other stream parameters set correctly.
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_stream_analog_level(127));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+  // -- No stream parameters --
+  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+  EXPECT_EQ(apm_->kNoError,
+            apm_->AnalyzeReverseStream(revframe_));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            apm_->ProcessStream(frame_));
+
+  // -- All there --
+  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->set_stream_drift_samples(0));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_stream_analog_level(127));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+}
+
+TEST_F(ApmTest, Channels) {
+  // Testing number of invalid channels
+  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(0, 1));
+  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 0));
+  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(3, 1));
+  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 3));
+  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(0));
+  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(3));
+  // Testing number of valid channels
+  for (int i = 1; i < 3; i++) {
+    for (int j = 1; j < 3; j++) {
+      if (j > i) {
+        EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(i, j));
+      } else {
+        EXPECT_EQ(apm_->kNoError, apm_->set_num_channels(i, j));
+        EXPECT_EQ(j, apm_->num_output_channels());
+      }
+    }
+    EXPECT_EQ(i, apm_->num_input_channels());
+    EXPECT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(i));
+    EXPECT_EQ(i, apm_->num_reverse_channels());
+  }
+}
+
+TEST_F(ApmTest, SampleRates) {
+  // Testing invalid sample rates
+  EXPECT_EQ(apm_->kBadParameterError, apm_->set_sample_rate_hz(10000));
+  // Testing valid sample rates
+  int fs[] = {8000, 16000, 32000};
+  for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
+    EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(fs[i]));
+    EXPECT_EQ(fs[i], apm_->sample_rate_hz());
+  }
+}
+
+
+TEST_F(ApmTest, EchoCancellation) {
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(true));
+  EXPECT_TRUE(apm_->echo_cancellation()->is_drift_compensation_enabled());
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(false));
+  EXPECT_FALSE(apm_->echo_cancellation()->is_drift_compensation_enabled());
+
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->echo_cancellation()->set_device_sample_rate_hz(4000));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->echo_cancellation()->set_device_sample_rate_hz(100000));
+
+  int rate[] = {16000, 44100, 48000};
+  for (size_t i = 0; i < sizeof(rate)/sizeof(*rate); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->echo_cancellation()->set_device_sample_rate_hz(rate[i]));
+    EXPECT_EQ(rate[i],
+        apm_->echo_cancellation()->device_sample_rate_hz());
+  }
+
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->echo_cancellation()->set_suppression_level(
+          static_cast<EchoCancellation::SuppressionLevel>(-1)));
+
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->echo_cancellation()->set_suppression_level(
+          static_cast<EchoCancellation::SuppressionLevel>(4)));
+
+  EchoCancellation::SuppressionLevel level[] = {
+    EchoCancellation::kLowSuppression,
+    EchoCancellation::kModerateSuppression,
+    EchoCancellation::kHighSuppression,
+  };
+  for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->echo_cancellation()->set_suppression_level(level[i]));
+    EXPECT_EQ(level[i],
+        apm_->echo_cancellation()->suppression_level());
+  }
+
+  EchoCancellation::Metrics metrics;
+  EXPECT_EQ(apm_->kNotEnabledError,
+            apm_->echo_cancellation()->GetMetrics(&metrics));
+
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_metrics(true));
+  EXPECT_TRUE(apm_->echo_cancellation()->are_metrics_enabled());
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_metrics(false));
+  EXPECT_FALSE(apm_->echo_cancellation()->are_metrics_enabled());
+
+  int median = 0;
+  int std = 0;
+  EXPECT_EQ(apm_->kNotEnabledError,
+            apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
+
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_delay_logging(true));
+  EXPECT_TRUE(apm_->echo_cancellation()->is_delay_logging_enabled());
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_delay_logging(false));
+  EXPECT_FALSE(apm_->echo_cancellation()->is_delay_logging_enabled());
+
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  EXPECT_TRUE(apm_->echo_cancellation()->is_enabled());
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
+  EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
+}
+
+TEST_F(ApmTest, EchoControlMobile) {
+  // AECM won't use super-wideband.
+  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+  EXPECT_EQ(apm_->kBadSampleRateError, apm_->echo_control_mobile()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
+  // Turn AECM on (and AEC off)
+  EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
+  EXPECT_TRUE(apm_->echo_control_mobile()->is_enabled());
+
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->echo_control_mobile()->set_routing_mode(
+      static_cast<EchoControlMobile::RoutingMode>(-1)));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->echo_control_mobile()->set_routing_mode(
+      static_cast<EchoControlMobile::RoutingMode>(5)));
+
+  // Toggle routing modes
+  EchoControlMobile::RoutingMode mode[] = {
+      EchoControlMobile::kQuietEarpieceOrHeadset,
+      EchoControlMobile::kEarpiece,
+      EchoControlMobile::kLoudEarpiece,
+      EchoControlMobile::kSpeakerphone,
+      EchoControlMobile::kLoudSpeakerphone,
+  };
+  for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->echo_control_mobile()->set_routing_mode(mode[i]));
+    EXPECT_EQ(mode[i],
+        apm_->echo_control_mobile()->routing_mode());
+  }
+  // Turn comfort noise off/on
+  EXPECT_EQ(apm_->kNoError,
+      apm_->echo_control_mobile()->enable_comfort_noise(false));
+  EXPECT_FALSE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
+  EXPECT_EQ(apm_->kNoError,
+      apm_->echo_control_mobile()->enable_comfort_noise(true));
+  EXPECT_TRUE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
+  // Set and get echo path
+  const size_t echo_path_size =
+      apm_->echo_control_mobile()->echo_path_size_bytes();
+  scoped_array<char> echo_path_in(new char[echo_path_size]);
+  scoped_array<char> echo_path_out(new char[echo_path_size]);
+  EXPECT_EQ(apm_->kNullPointerError,
+            apm_->echo_control_mobile()->SetEchoPath(NULL, echo_path_size));
+  EXPECT_EQ(apm_->kNullPointerError,
+            apm_->echo_control_mobile()->GetEchoPath(NULL, echo_path_size));
+  EXPECT_EQ(apm_->kBadParameterError,
+            apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(), 1));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
+                                                     echo_path_size));
+  for (size_t i = 0; i < echo_path_size; i++) {
+    echo_path_in[i] = echo_path_out[i] + 1;
+  }
+  EXPECT_EQ(apm_->kBadParameterError,
+            apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(), 1));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(),
+                                                     echo_path_size));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
+                                                     echo_path_size));
+  for (size_t i = 0; i < echo_path_size; i++) {
+    EXPECT_EQ(echo_path_in[i], echo_path_out[i]);
+  }
+  // Turn AECM off
+  EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
+  EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
+}
+
+TEST_F(ApmTest, GainControl) {
+  // Testing gain modes
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_mode(static_cast<GainControl::Mode>(-1)));
+
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_mode(static_cast<GainControl::Mode>(3)));
+
+  EXPECT_EQ(apm_->kNoError,
+      apm_->gain_control()->set_mode(
+      apm_->gain_control()->mode()));
+
+  GainControl::Mode mode[] = {
+    GainControl::kAdaptiveAnalog,
+    GainControl::kAdaptiveDigital,
+    GainControl::kFixedDigital
+  };
+  for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_mode(mode[i]));
+    EXPECT_EQ(mode[i], apm_->gain_control()->mode());
+  }
+  // Testing invalid target levels
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_target_level_dbfs(-3));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_target_level_dbfs(-40));
+  // Testing valid target levels
+  EXPECT_EQ(apm_->kNoError,
+      apm_->gain_control()->set_target_level_dbfs(
+      apm_->gain_control()->target_level_dbfs()));
+
+  int level_dbfs[] = {0, 6, 31};
+  for (size_t i = 0; i < sizeof(level_dbfs)/sizeof(*level_dbfs); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_target_level_dbfs(level_dbfs[i]));
+    EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs());
+  }
+
+  // Testing invalid compression gains
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_compression_gain_db(-1));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_compression_gain_db(100));
+
+  // Testing valid compression gains
+  EXPECT_EQ(apm_->kNoError,
+      apm_->gain_control()->set_compression_gain_db(
+      apm_->gain_control()->compression_gain_db()));
+
+  int gain_db[] = {0, 10, 90};
+  for (size_t i = 0; i < sizeof(gain_db)/sizeof(*gain_db); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_compression_gain_db(gain_db[i]));
+    EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db());
+  }
+
+  // Testing limiter off/on
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(false));
+  EXPECT_FALSE(apm_->gain_control()->is_limiter_enabled());
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(true));
+  EXPECT_TRUE(apm_->gain_control()->is_limiter_enabled());
+
+  // Testing invalid level limits
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_analog_level_limits(-1, 512));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_analog_level_limits(100000, 512));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_analog_level_limits(512, -1));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_analog_level_limits(512, 100000));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_analog_level_limits(512, 255));
+
+  // Testing valid level limits
+  EXPECT_EQ(apm_->kNoError,
+      apm_->gain_control()->set_analog_level_limits(
+      apm_->gain_control()->analog_level_minimum(),
+      apm_->gain_control()->analog_level_maximum()));
+
+  int min_level[] = {0, 255, 1024};
+  for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_analog_level_limits(min_level[i], 1024));
+    EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum());
+  }
+
+  int max_level[] = {0, 1024, 65535};
+  for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_analog_level_limits(0, max_level[i]));
+    EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum());
+  }
+
+  // TODO(ajm): stream_is_saturated() and stream_analog_level()
+
+  // Turn AGC off
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+  EXPECT_FALSE(apm_->gain_control()->is_enabled());
+}
+
+TEST_F(ApmTest, NoiseSuppression) {
+  // Tesing invalid suppression levels
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->noise_suppression()->set_level(
+          static_cast<NoiseSuppression::Level>(-1)));
+
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->noise_suppression()->set_level(
+          static_cast<NoiseSuppression::Level>(5)));
+
+  // Tesing valid suppression levels
+  NoiseSuppression::Level level[] = {
+    NoiseSuppression::kLow,
+    NoiseSuppression::kModerate,
+    NoiseSuppression::kHigh,
+    NoiseSuppression::kVeryHigh
+  };
+  for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->noise_suppression()->set_level(level[i]));
+    EXPECT_EQ(level[i], apm_->noise_suppression()->level());
+  }
+
+  // Turing NS on/off
+  EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
+  EXPECT_TRUE(apm_->noise_suppression()->is_enabled());
+  EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(false));
+  EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
+}
+
+TEST_F(ApmTest, HighPassFilter) {
+  // Turing HP filter on/off
+  EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(true));
+  EXPECT_TRUE(apm_->high_pass_filter()->is_enabled());
+  EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(false));
+  EXPECT_FALSE(apm_->high_pass_filter()->is_enabled());
+}
+
+TEST_F(ApmTest, LevelEstimator) {
+  // Turning level estimator on/off
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+  EXPECT_FALSE(apm_->level_estimator()->is_enabled());
+
+  EXPECT_EQ(apm_->kNotEnabledError, apm_->level_estimator()->RMS());
+
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+  EXPECT_TRUE(apm_->level_estimator()->is_enabled());
+
+  // Run this test in wideband; in super-wb, the splitting filter distorts the
+  // audio enough to cause deviation from the expectation for small values.
+  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
+  frame_->_payloadDataLengthInSamples = 160;
+  frame_->_audioChannel = 2;
+  frame_->_frequencyInHz = 16000;
+
+  // Min value if no frames have been processed.
+  EXPECT_EQ(127, apm_->level_estimator()->RMS());
+
+  // Min value on zero frames.
+  SetFrameTo(frame_, 0);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(127, apm_->level_estimator()->RMS());
+
+  // Try a few RMS values.
+  // (These also test that the value resets after retrieving it.)
+  SetFrameTo(frame_, 32767);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(0, apm_->level_estimator()->RMS());
+
+  SetFrameTo(frame_, 30000);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(1, apm_->level_estimator()->RMS());
+
+  SetFrameTo(frame_, 10000);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(10, apm_->level_estimator()->RMS());
+
+  SetFrameTo(frame_, 10);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(70, apm_->level_estimator()->RMS());
+
+  // Min value if _energy == 0.
+  SetFrameTo(frame_, 10000);
+  uint32_t energy = frame_->_energy; // Save default to restore below.
+  frame_->_energy = 0;
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(127, apm_->level_estimator()->RMS());
+  frame_->_energy = energy;
+
+  // Verify reset after enable/disable.
+  SetFrameTo(frame_, 32767);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+  SetFrameTo(frame_, 1);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(90, apm_->level_estimator()->RMS());
+
+  // Verify reset after initialize.
+  SetFrameTo(frame_, 32767);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+  SetFrameTo(frame_, 1);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(90, apm_->level_estimator()->RMS());
+}
+
+TEST_F(ApmTest, VoiceDetection) {
+  // Test external VAD
+  EXPECT_EQ(apm_->kNoError,
+            apm_->voice_detection()->set_stream_has_voice(true));
+  EXPECT_TRUE(apm_->voice_detection()->stream_has_voice());
+  EXPECT_EQ(apm_->kNoError,
+            apm_->voice_detection()->set_stream_has_voice(false));
+  EXPECT_FALSE(apm_->voice_detection()->stream_has_voice());
+
+  // Tesing invalid likelihoods
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->voice_detection()->set_likelihood(
+          static_cast<VoiceDetection::Likelihood>(-1)));
+
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->voice_detection()->set_likelihood(
+          static_cast<VoiceDetection::Likelihood>(5)));
+
+  // Tesing valid likelihoods
+  VoiceDetection::Likelihood likelihood[] = {
+      VoiceDetection::kVeryLowLikelihood,
+      VoiceDetection::kLowLikelihood,
+      VoiceDetection::kModerateLikelihood,
+      VoiceDetection::kHighLikelihood
+  };
+  for (size_t i = 0; i < sizeof(likelihood)/sizeof(*likelihood); i++) {
+    EXPECT_EQ(apm_->kNoError,
+              apm_->voice_detection()->set_likelihood(likelihood[i]));
+    EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood());
+  }
+
+  /* TODO(bjornv): Enable once VAD supports other frame lengths than 10 ms
+  // Tesing invalid frame sizes
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->voice_detection()->set_frame_size_ms(12));
+
+  // Tesing valid frame sizes
+  for (int i = 10; i <= 30; i += 10) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->voice_detection()->set_frame_size_ms(i));
+    EXPECT_EQ(i, apm_->voice_detection()->frame_size_ms());
+  }
+  */
+
+  // Turing VAD on/off
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+  EXPECT_TRUE(apm_->voice_detection()->is_enabled());
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+  EXPECT_FALSE(apm_->voice_detection()->is_enabled());
+
+  // Test that AudioFrame activity is maintained when VAD is disabled.
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+  AudioFrame::VADActivity activity[] = {
+      AudioFrame::kVadActive,
+      AudioFrame::kVadPassive,
+      AudioFrame::kVadUnknown
+  };
+  for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) {
+    frame_->_vadActivity = activity[i];
+    EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+    EXPECT_EQ(activity[i], frame_->_vadActivity);
+  }
+
+  // Test that AudioFrame activity is set when VAD is enabled.
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+  frame_->_vadActivity = AudioFrame::kVadUnknown;
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_NE(AudioFrame::kVadUnknown, frame_->_vadActivity);
+
+  // TODO(bjornv): Add tests for streamed voice; stream_has_voice()
+}
+
+TEST_F(ApmTest, SplittingFilter) {
+  // Verify the filter is not active through undistorted audio when:
+  // 1. No components are enabled...
+  SetFrameTo(frame_, 1000);
+  AudioFrame frame_copy = *frame_;
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+
+  // 2. Only the level estimator is enabled...
+  SetFrameTo(frame_, 1000);
+  frame_copy = *frame_;
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+
+  // 3. Only VAD is enabled...
+  SetFrameTo(frame_, 1000);
+  frame_copy = *frame_;
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+
+  // 4. Both VAD and the level estimator are enabled...
+  SetFrameTo(frame_, 1000);
+  frame_copy = *frame_;
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+
+  // 5. Not using super-wb.
+  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
+  frame_->_payloadDataLengthInSamples = 160;
+  frame_->_audioChannel = 2;
+  frame_->_frequencyInHz = 16000;
+  // Enable AEC, which would require the filter in super-wb. We rely on the
+  // first few frames of data being unaffected by the AEC.
+  // TODO(andrew): This test, and the one below, rely rather tenuously on the
+  // behavior of the AEC. Think of something more robust.
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  SetFrameTo(frame_, 1000);
+  frame_copy = *frame_;
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->set_stream_drift_samples(0));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->set_stream_drift_samples(0));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+
+  // Check the test is valid. We should have distortion from the filter
+  // when AEC is enabled (which won't affect the audio).
+  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+  frame_->_payloadDataLengthInSamples = 320;
+  frame_->_audioChannel = 2;
+  frame_->_frequencyInHz = 32000;
+  SetFrameTo(frame_, 1000);
+  frame_copy = *frame_;
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->set_stream_drift_samples(0));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
+}
+
+// TODO(andrew): expand test to verify output.
+TEST_F(ApmTest, DebugDump) {
+  const std::string filename = webrtc::test::OutputPath() + "debug.aec";
+  EXPECT_EQ(apm_->kNullPointerError, apm_->StartDebugRecording(NULL));
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+  // Stopping without having started should be OK.
+  EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
+
+  EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(filename.c_str()));
+  EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
+
+  // Verify the file has been written.
+  ASSERT_TRUE(fopen(filename.c_str(), "r") != NULL);
+  // Clean it up.
+  ASSERT_EQ(0, remove(filename.c_str()));
+#else
+  EXPECT_EQ(apm_->kUnsupportedFunctionError,
+            apm_->StartDebugRecording(filename.c_str()));
+  EXPECT_EQ(apm_->kUnsupportedFunctionError, apm_->StopDebugRecording());
+
+  // Verify the file has NOT been written.
+  ASSERT_TRUE(fopen(filename.c_str(), "r") == NULL);
+#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
+}
+
+TEST_F(ApmTest, Process) {
+  GOOGLE_PROTOBUF_VERIFY_VERSION;
+  webrtc::audioproc::OutputData output_data;
+
+  if (!write_output_data) {
+    ReadMessageLiteFromFile(output_filename, &output_data);
+  } else {
+    // We don't have a file; add the required tests to the protobuf.
+    // TODO(ajm): vary the output channels as well?
+    const int channels[] = {1, 2};
+    const size_t channels_size = sizeof(channels) / sizeof(*channels);
+#if defined(WEBRTC_APM_UNIT_TEST_FIXED_PROFILE)
+    // AECM doesn't support super-wb.
+    const int sample_rates[] = {8000, 16000};
+#elif defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+    const int sample_rates[] = {8000, 16000, 32000};
+#endif
+    const size_t sample_rates_size = sizeof(sample_rates) / sizeof(*sample_rates);
+    for (size_t i = 0; i < channels_size; i++) {
+      for (size_t j = 0; j < channels_size; j++) {
+        for (size_t k = 0; k < sample_rates_size; k++) {
+          webrtc::audioproc::Test* test = output_data.add_test();
+          test->set_num_reverse_channels(channels[i]);
+          test->set_num_input_channels(channels[j]);
+          test->set_num_output_channels(channels[j]);
+          test->set_sample_rate(sample_rates[k]);
+        }
+      }
+    }
+  }
+
+#if defined(WEBRTC_APM_UNIT_TEST_FIXED_PROFILE)
+  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
+  EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
+
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+#elif defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(true));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_metrics(true));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_delay_logging(true));
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_analog_level_limits(0, 255));
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+#endif
+
+  EXPECT_EQ(apm_->kNoError,
+            apm_->high_pass_filter()->Enable(true));
+
+  EXPECT_EQ(apm_->kNoError,
+            apm_->level_estimator()->Enable(true));
+
+  EXPECT_EQ(apm_->kNoError,
+            apm_->noise_suppression()->Enable(true));
+
+  EXPECT_EQ(apm_->kNoError,
+            apm_->voice_detection()->Enable(true));
+
+  for (int i = 0; i < output_data.test_size(); i++) {
+    printf("Running test %d of %d...\n", i + 1, output_data.test_size());
+
+    webrtc::audioproc::Test* test = output_data.mutable_test(i);
+    const int samples_per_channel = test->sample_rate() / 100;
+    revframe_->_payloadDataLengthInSamples = samples_per_channel;
+    revframe_->_audioChannel = test->num_reverse_channels();
+    revframe_->_frequencyInHz = test->sample_rate();
+    frame_->_payloadDataLengthInSamples = samples_per_channel;
+    frame_->_audioChannel = test->num_input_channels();
+    frame_->_frequencyInHz = test->sample_rate();
+
+    EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+    ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(test->sample_rate()));
+    ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(frame_->_audioChannel,
+                                                     frame_->_audioChannel));
+    ASSERT_EQ(apm_->kNoError,
+        apm_->set_num_reverse_channels(revframe_->_audioChannel));
+
+    int frame_count = 0;
+    int has_echo_count = 0;
+    int has_voice_count = 0;
+    int is_saturated_count = 0;
+    int analog_level = 127;
+    int analog_level_average = 0;
+    int max_output_average = 0;
+
+    while (1) {
+      // Read far-end frame
+      const size_t frame_size = samples_per_channel * 2;
+      size_t read_count = fread(revframe_->_payloadData,
+                                sizeof(int16_t),
+                                frame_size,
+                                far_file_);
+      if (read_count != frame_size) {
+        // Check that the file really ended.
+        ASSERT_NE(0, feof(far_file_));
+        break; // This is expected.
+      }
+
+      if (revframe_->_audioChannel == 1) {
+        MixStereoToMono(revframe_->_payloadData, revframe_->_payloadData,
+                        samples_per_channel);
+      }
+
+      EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
+
+      EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+      EXPECT_EQ(apm_->kNoError,
+          apm_->echo_cancellation()->set_stream_drift_samples(0));
+      EXPECT_EQ(apm_->kNoError,
+          apm_->gain_control()->set_stream_analog_level(analog_level));
+
+      // Read near-end frame
+      read_count = fread(frame_->_payloadData,
+                         sizeof(int16_t),
+                         frame_size,
+                         near_file_);
+      if (read_count != frame_size) {
+        // Check that the file really ended.
+        ASSERT_NE(0, feof(near_file_));
+        break; // This is expected.
+      }
+
+      if (frame_->_audioChannel == 1) {
+        MixStereoToMono(frame_->_payloadData, frame_->_payloadData,
+                        samples_per_channel);
+      }
+      frame_->_vadActivity = AudioFrame::kVadUnknown;
+
+      EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+
+      max_output_average += MaxAudioFrame(*frame_);
+
+      if (apm_->echo_cancellation()->stream_has_echo()) {
+        has_echo_count++;
+      }
+
+      analog_level = apm_->gain_control()->stream_analog_level();
+      analog_level_average += analog_level;
+      if (apm_->gain_control()->stream_is_saturated()) {
+        is_saturated_count++;
+      }
+      if (apm_->voice_detection()->stream_has_voice()) {
+        has_voice_count++;
+        EXPECT_EQ(AudioFrame::kVadActive, frame_->_vadActivity);
+      } else {
+        EXPECT_EQ(AudioFrame::kVadPassive, frame_->_vadActivity);
+      }
+
+      frame_count++;
+    }
+    max_output_average /= frame_count;
+    analog_level_average /= frame_count;
+
+#if defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+    EchoCancellation::Metrics echo_metrics;
+    EXPECT_EQ(apm_->kNoError,
+              apm_->echo_cancellation()->GetMetrics(&echo_metrics));
+    int median = 0;
+    int std = 0;
+    EXPECT_EQ(apm_->kNoError,
+              apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
+
+    int rms_level = apm_->level_estimator()->RMS();
+    EXPECT_LE(0, rms_level);
+    EXPECT_GE(127, rms_level);
+#endif
+
+    if (!write_output_data) {
+      EXPECT_EQ(test->has_echo_count(), has_echo_count);
+      EXPECT_EQ(test->has_voice_count(), has_voice_count);
+      EXPECT_EQ(test->is_saturated_count(), is_saturated_count);
+
+      EXPECT_EQ(test->analog_level_average(), analog_level_average);
+      EXPECT_EQ(test->max_output_average(), max_output_average);
+
+#if defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+      webrtc::audioproc::Test::EchoMetrics reference =
+          test->echo_metrics();
+      TestStats(echo_metrics.residual_echo_return_loss,
+                reference.residual_echo_return_loss());
+      TestStats(echo_metrics.echo_return_loss,
+                reference.echo_return_loss());
+      TestStats(echo_metrics.echo_return_loss_enhancement,
+                reference.echo_return_loss_enhancement());
+      TestStats(echo_metrics.a_nlp,
+                reference.a_nlp());
+
+      webrtc::audioproc::Test::DelayMetrics reference_delay =
+          test->delay_metrics();
+      EXPECT_EQ(reference_delay.median(), median);
+      EXPECT_EQ(reference_delay.std(), std);
+
+      EXPECT_EQ(test->rms_level(), rms_level);
+#endif
+    } else {
+      test->set_has_echo_count(has_echo_count);
+      test->set_has_voice_count(has_voice_count);
+      test->set_is_saturated_count(is_saturated_count);
+
+      test->set_analog_level_average(analog_level_average);
+      test->set_max_output_average(max_output_average);
+
+#if defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+      webrtc::audioproc::Test::EchoMetrics* message =
+          test->mutable_echo_metrics();
+      WriteStatsMessage(echo_metrics.residual_echo_return_loss,
+                        message->mutable_residual_echo_return_loss());
+      WriteStatsMessage(echo_metrics.echo_return_loss,
+                        message->mutable_echo_return_loss());
+      WriteStatsMessage(echo_metrics.echo_return_loss_enhancement,
+                        message->mutable_echo_return_loss_enhancement());
+      WriteStatsMessage(echo_metrics.a_nlp,
+                        message->mutable_a_nlp());
+
+      webrtc::audioproc::Test::DelayMetrics* message_delay =
+          test->mutable_delay_metrics();
+      message_delay->set_median(median);
+      message_delay->set_std(std);
+
+      test->set_rms_level(rms_level);
+#endif
+    }
+
+    rewind(far_file_);
+    rewind(near_file_);
+  }
+
+  if (write_output_data) {
+    WriteMessageLiteToFile(output_filename, output_data);
+  }
+}
+}  // namespace
+
+int main(int argc, char** argv) {
+  ::testing::InitGoogleTest(&argc, argv);
+
+  for (int i = 1; i < argc; i++) {
+    if (strcmp(argv[i], "--write_output_data") == 0) {
+      write_output_data = true;
+    }
+  }
+
+  int err = RUN_ALL_TESTS();
+
+  // Optional, but removes memory leak noise from Valgrind.
+  google::protobuf::ShutdownProtobufLibrary();
+  return err;
+}
diff --git a/src/modules/audio_processing/test/unittest.proto b/src/modules/audio_processing/test/unittest.proto
new file mode 100644
index 0000000..67ba722
--- /dev/null
+++ b/src/modules/audio_processing/test/unittest.proto
@@ -0,0 +1,52 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.audioproc;
+
+message Test {
+  optional int32 num_reverse_channels = 1;
+  optional int32 num_input_channels = 2;
+  optional int32 num_output_channels = 3;
+  optional int32 sample_rate = 4;
+
+  message Frame {
+  }
+
+  repeated Frame frame = 5;
+
+  optional int32 analog_level_average = 6;
+  optional int32 max_output_average = 7;
+
+  optional int32 has_echo_count = 8;
+  optional int32 has_voice_count = 9;
+  optional int32 is_saturated_count = 10;
+
+  message Statistic {
+    optional int32 instant = 1;
+    optional int32 average = 2;
+    optional int32 maximum = 3;
+    optional int32 minimum = 4;
+  }
+
+  message EchoMetrics {
+    optional Statistic residual_echo_return_loss = 1;
+    optional Statistic echo_return_loss = 2;
+    optional Statistic echo_return_loss_enhancement = 3;
+    optional Statistic a_nlp = 4;
+  }
+
+  optional EchoMetrics echo_metrics = 11;
+
+  message DelayMetrics {
+    optional int32 median = 1;
+    optional int32 std = 2;
+  }
+
+  optional DelayMetrics delay_metrics = 12;
+
+  optional int32 rms_level = 13;
+}
+
+message OutputData {
+  repeated Test test = 1;
+}
+
diff --git a/src/modules/audio_processing/test/unpack.cc b/src/modules/audio_processing/test/unpack.cc
new file mode 100644
index 0000000..2337131
--- /dev/null
+++ b/src/modules/audio_processing/test/unpack.cc
@@ -0,0 +1,216 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Commandline tool to unpack audioproc debug files.
+//
+// The debug files are dumped as protobuf blobs. For analysis, it's necessary
+// to unpack the file into its component parts: audio and other data.
+
+#include <stdio.h>
+
+#include "google/gflags.h"
+#include "scoped_ptr.h"
+#include "typedefs.h"
+#include "webrtc/audio_processing/debug.pb.h"
+
+using webrtc::scoped_array;
+
+using webrtc::audioproc::Event;
+using webrtc::audioproc::ReverseStream;
+using webrtc::audioproc::Stream;
+using webrtc::audioproc::Init;
+
+// TODO(andrew): unpack more of the data.
+DEFINE_string(input_file, "input.pcm", "The name of the input stream file.");
+DEFINE_string(output_file, "ref_out.pcm",
+              "The name of the reference output stream file.");
+DEFINE_string(reverse_file, "reverse.pcm",
+              "The name of the reverse input stream file.");
+DEFINE_string(delay_file, "delay.int32", "The name of the delay file.");
+DEFINE_string(drift_file, "drift.int32", "The name of the drift file.");
+DEFINE_string(level_file, "level.int32", "The name of the level file.");
+DEFINE_string(settings_file, "settings.txt", "The name of the settings file.");
+DEFINE_bool(full, false,
+            "Unpack the full set of files (normally not needed).");
+
+// TODO(andrew): move this to a helper class to share with process_test.cc?
+// Returns true on success, false on error or end-of-file.
+bool ReadMessageFromFile(FILE* file,
+                        ::google::protobuf::MessageLite* msg) {
+  // The "wire format" for the size is little-endian.
+  // Assume process_test is running on a little-endian machine.
+  int32_t size = 0;
+  if (fread(&size, sizeof(int32_t), 1, file) != 1) {
+    return false;
+  }
+  if (size <= 0) {
+    return false;
+  }
+  const size_t usize = static_cast<size_t>(size);
+
+  scoped_array<char> array(new char[usize]);
+  if (fread(array.get(), sizeof(char), usize, file) != usize) {
+    return false;
+  }
+
+  msg->Clear();
+  return msg->ParseFromArray(array.get(), usize);
+}
+
+int main(int argc, char* argv[]) {
+  std::string program_name = argv[0];
+  std::string usage = "Commandline tool to unpack audioproc debug files.\n"
+    "Example usage:\n" + program_name + " debug_dump.pb\n";
+  google::SetUsageMessage(usage);
+  google::ParseCommandLineFlags(&argc, &argv, true);
+
+  if (argc < 2) {
+    printf("%s", google::ProgramUsage());
+    return 1;
+  }
+
+  FILE* debug_file = fopen(argv[1], "rb");
+  if (debug_file == NULL) {
+    printf("Unable to open %s\n", argv[1]);
+    return 1;
+  }
+  FILE* input_file = fopen(FLAGS_input_file.c_str(), "wb");
+  if (input_file == NULL) {
+    printf("Unable to open %s\n", FLAGS_input_file.c_str());
+    return 1;
+  }
+  FILE* output_file = fopen(FLAGS_output_file.c_str(), "wb");
+  if (output_file == NULL) {
+    printf("Unable to open %s\n", FLAGS_output_file.c_str());
+    return 1;
+  }
+  FILE* reverse_file = fopen(FLAGS_reverse_file.c_str(), "wb");
+  if (reverse_file == NULL) {
+    printf("Unable to open %s\n", FLAGS_reverse_file.c_str());
+    return 1;
+  }
+  FILE* settings_file = fopen(FLAGS_settings_file.c_str(), "wb");
+  if (settings_file == NULL) {
+    printf("Unable to open %s\n", FLAGS_settings_file.c_str());
+    return 1;
+  }
+
+  FILE* delay_file = NULL;
+  FILE* drift_file = NULL;
+  FILE* level_file = NULL;
+  if (FLAGS_full) {
+    delay_file = fopen(FLAGS_delay_file.c_str(), "wb");
+    if (delay_file == NULL) {
+      printf("Unable to open %s\n", FLAGS_delay_file.c_str());
+      return 1;
+    }
+    drift_file = fopen(FLAGS_drift_file.c_str(), "wb");
+    if (drift_file == NULL) {
+      printf("Unable to open %s\n", FLAGS_drift_file.c_str());
+      return 1;
+    }
+    level_file = fopen(FLAGS_level_file.c_str(), "wb");
+    if (level_file == NULL) {
+      printf("Unable to open %s\n", FLAGS_level_file.c_str());
+      return 1;
+    }
+  }
+
+  Event event_msg;
+  int frame_count = 0;
+  while (ReadMessageFromFile(debug_file, &event_msg)) {
+    if (event_msg.type() == Event::REVERSE_STREAM) {
+      if (!event_msg.has_reverse_stream()) {
+        printf("Corrupted input file: ReverseStream missing.\n");
+        return 1;
+      }
+
+      const ReverseStream msg = event_msg.reverse_stream();
+      if (msg.has_data()) {
+        if (fwrite(msg.data().data(), msg.data().size(), 1, reverse_file) !=
+            1) {
+          printf("Error when writing to %s\n", FLAGS_reverse_file.c_str());
+          return 1;
+        }
+      }
+    } else if (event_msg.type() == Event::STREAM) {
+      frame_count++;
+      if (!event_msg.has_stream()) {
+        printf("Corrupted input file: Stream missing.\n");
+        return 1;
+      }
+
+      const Stream msg = event_msg.stream();
+      if (msg.has_input_data()) {
+        if (fwrite(msg.input_data().data(), msg.input_data().size(), 1,
+                   input_file) != 1) {
+          printf("Error when writing to %s\n", FLAGS_input_file.c_str());
+          return 1;
+        }
+      }
+
+      if (msg.has_output_data()) {
+        if (fwrite(msg.output_data().data(), msg.output_data().size(), 1,
+                   output_file) != 1) {
+          printf("Error when writing to %s\n", FLAGS_output_file.c_str());
+          return 1;
+        }
+      }
+
+      if (FLAGS_full) {
+        if (msg.has_delay()) {
+          int32_t delay = msg.delay();
+          if (fwrite(&delay, sizeof(int32_t), 1, delay_file) != 1) {
+            printf("Error when writing to %s\n", FLAGS_delay_file.c_str());
+            return 1;
+          }
+        }
+
+        if (msg.has_drift()) {
+          int32_t drift = msg.drift();
+          if (fwrite(&drift, sizeof(int32_t), 1, drift_file) != 1) {
+            printf("Error when writing to %s\n", FLAGS_drift_file.c_str());
+            return 1;
+          }
+        }
+
+        if (msg.has_level()) {
+          int32_t level = msg.level();
+          if (fwrite(&level, sizeof(int32_t), 1, level_file) != 1) {
+            printf("Error when writing to %s\n", FLAGS_level_file.c_str());
+            return 1;
+          }
+        }
+      }
+    } else if (event_msg.type() == Event::INIT) {
+      if (!event_msg.has_init()) {
+        printf("Corrupted input file: Init missing.\n");
+        return 1;
+      }
+
+      const Init msg = event_msg.init();
+      // These should print out zeros if they're missing.
+      fprintf(settings_file, "Init at frame: %d\n", frame_count);
+      fprintf(settings_file, "  Sample rate: %d\n", msg.sample_rate());
+      fprintf(settings_file, "  Device sample rate: %d\n",
+              msg.device_sample_rate());
+      fprintf(settings_file, "  Input channels: %d\n",
+              msg.num_input_channels());
+      fprintf(settings_file, "  Output channels: %d\n",
+              msg.num_output_channels());
+      fprintf(settings_file, "  Reverse channels: %d\n",
+              msg.num_reverse_channels());
+
+      fprintf(settings_file, "\n");
+    }
+  }
+
+  return 0;
+}
diff --git a/src/modules/audio_processing/utility/Android.mk b/src/modules/audio_processing/utility/Android.mk
index 7e758ce..bd3d039 100644
--- a/src/modules/audio_processing/utility/Android.mk
+++ b/src/modules/audio_processing/utility/Android.mk
@@ -10,40 +10,34 @@
 
 include $(CLEAR_VARS)
 
+include $(LOCAL_PATH)/../../../../android-webrtc.mk
+
 LOCAL_ARM_MODE := arm
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := libwebrtc_apm_utility
 LOCAL_MODULE_TAGS := optional
-LOCAL_GENERATED_SOURCES :=
-LOCAL_SRC_FILES := fft4g.c \
-    ring_buffer.c
+LOCAL_SRC_FILES := \
+    fft4g.c \
+    ring_buffer.c \
+    delay_estimator.c \
+    delay_estimator_wrapper.c
 
 # Flags passed to both C and C++ files.
-MY_CFLAGS :=  
-MY_CFLAGS_C :=
-MY_DEFS := '-DNO_TCMALLOC' \
-    '-DNO_HEAPCHECKER' \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' \
-    '-DWEBRTC_THREAD_RR' \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
-LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS)
 
 # Include paths placed before CFLAGS/CPPFLAGS
 LOCAL_C_INCLUDES := \
-    $(LOCAL_PATH)
+    $(LOCAL_PATH) \
+    $(LOCAL_PATH)/../../.. \
+    $(LOCAL_PATH)/../../../common_audio/signal_processing/include
 
-# Flags passed to only C++ (and not C) files.
-LOCAL_CPPFLAGS := 
-LOCAL_LDFLAGS :=
-
-LOCAL_STATIC_LIBRARIES :=
-
-LOCAL_SHARED_LIBRARIES := libcutils \
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
     libdl \
     libstlport
-LOCAL_ADDITIONAL_DEPENDENCIES :=
 
+ifndef NDK_ROOT
 include external/stlport/libstlport.mk
+endif
 include $(BUILD_STATIC_LIBRARY)
diff --git a/src/modules/audio_processing/utility/delay_estimator.c b/src/modules/audio_processing/utility/delay_estimator.c
new file mode 100644
index 0000000..24ee74d
--- /dev/null
+++ b/src/modules/audio_processing/utility/delay_estimator.c
@@ -0,0 +1,319 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "delay_estimator.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+// Number of right shifts for scaling is linearly depending on number of bits in
+// the far-end binary spectrum.
+static const int kShiftsAtZero = 13;  // Right shifts at zero binary spectrum.
+static const int kShiftsLinearSlope = 3;
+
+static const int32_t kProbabilityOffset = 1024;  // 2 in Q9.
+static const int32_t kProbabilityLowerLimit = 8704;  // 17 in Q9.
+static const int32_t kProbabilityMinSpread = 2816;  // 5.5 in Q9.
+
+// Counts and returns number of bits of a 32-bit word.
+static int BitCount(uint32_t u32) {
+  uint32_t tmp = u32 - ((u32 >> 1) & 033333333333) -
+      ((u32 >> 2) & 011111111111);
+  tmp = ((tmp + (tmp >> 3)) & 030707070707);
+  tmp = (tmp + (tmp >> 6));
+  tmp = (tmp + (tmp >> 12) + (tmp >> 24)) & 077;
+
+  return ((int) tmp);
+}
+
+// Compares the |binary_vector| with all rows of the |binary_matrix| and counts
+// per row the number of times they have the same value.
+//
+// Inputs:
+//      - binary_vector     : binary "vector" stored in a long
+//      - binary_matrix     : binary "matrix" stored as a vector of long
+//      - matrix_size       : size of binary "matrix"
+//
+// Output:
+//      - bit_counts        : "Vector" stored as a long, containing for each
+//                            row the number of times the matrix row and the
+//                            input vector have the same value
+//
+static void BitCountComparison(uint32_t binary_vector,
+                               const uint32_t* binary_matrix,
+                               int matrix_size,
+                               int32_t* bit_counts) {
+  int n = 0;
+
+  // Compare |binary_vector| with all rows of the |binary_matrix|
+  for (; n < matrix_size; n++) {
+    bit_counts[n] = (int32_t) BitCount(binary_vector ^ binary_matrix[n]);
+  }
+}
+
+int WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* handle) {
+  assert(handle != NULL);
+
+  if (handle->mean_bit_counts != NULL) {
+    free(handle->mean_bit_counts);
+    handle->mean_bit_counts = NULL;
+  }
+  if (handle->bit_counts != NULL) {
+    free(handle->bit_counts);
+    handle->bit_counts = NULL;
+  }
+  if (handle->binary_far_history != NULL) {
+    free(handle->binary_far_history);
+    handle->binary_far_history = NULL;
+  }
+  if (handle->binary_near_history != NULL) {
+    free(handle->binary_near_history);
+    handle->binary_near_history = NULL;
+  }
+  if (handle->far_bit_counts != NULL) {
+    free(handle->far_bit_counts);
+    handle->far_bit_counts = NULL;
+  }
+
+  free(handle);
+
+  return 0;
+}
+
+int WebRtc_CreateBinaryDelayEstimator(BinaryDelayEstimator** handle,
+                                      int max_delay,
+                                      int lookahead) {
+  BinaryDelayEstimator* self = NULL;
+  int history_size = max_delay + lookahead;
+
+  if (handle == NULL) {
+    return -1;
+  }
+  if (max_delay < 0) {
+    return -1;
+  }
+  if (lookahead < 0) {
+    return -1;
+  }
+  if (history_size < 2) {
+    // Must be this large for buffer shifting.
+    return -1;
+  }
+
+  self = malloc(sizeof(BinaryDelayEstimator));
+  *handle = self;
+  if (self == NULL) {
+    return -1;
+  }
+
+  self->mean_bit_counts = NULL;
+  self->bit_counts = NULL;
+  self->binary_far_history = NULL;
+  self->far_bit_counts = NULL;
+
+  self->history_size = history_size;
+  self->near_history_size = lookahead + 1;
+
+  // Allocate memory for spectrum buffers.
+  self->mean_bit_counts = malloc(history_size * sizeof(int32_t));
+  if (self->mean_bit_counts == NULL) {
+    WebRtc_FreeBinaryDelayEstimator(self);
+    self = NULL;
+    return -1;
+  }
+  self->bit_counts = malloc(history_size * sizeof(int32_t));
+  if (self->bit_counts == NULL) {
+    WebRtc_FreeBinaryDelayEstimator(self);
+    self = NULL;
+    return -1;
+  }
+  // Allocate memory for history buffers.
+  self->binary_far_history = malloc(history_size * sizeof(uint32_t));
+  if (self->binary_far_history == NULL) {
+    WebRtc_FreeBinaryDelayEstimator(self);
+    self = NULL;
+    return -1;
+  }
+  self->binary_near_history = malloc(self->near_history_size *
+      sizeof(uint32_t));
+  if (self->binary_near_history == NULL) {
+    WebRtc_FreeBinaryDelayEstimator(self);
+    self = NULL;
+    return -1;
+  }
+  self->far_bit_counts = malloc(history_size * sizeof(int));
+  if (self->far_bit_counts == NULL) {
+    WebRtc_FreeBinaryDelayEstimator(self);
+    self = NULL;
+    return -1;
+  }
+
+  return 0;
+}
+
+int WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* handle) {
+  int i = 0;
+  assert(handle != NULL);
+
+  memset(handle->bit_counts, 0, sizeof(int32_t) * handle->history_size);
+  memset(handle->binary_far_history, 0,
+         sizeof(uint32_t) * handle->history_size);
+  memset(handle->binary_near_history, 0,
+         sizeof(uint32_t) * handle->near_history_size);
+  memset(handle->far_bit_counts, 0, sizeof(int) * handle->history_size);
+  for (i = 0; i < handle->history_size; ++i) {
+    handle->mean_bit_counts[i] = (20 << 9);  // 20 in Q9.
+  }
+  handle->minimum_probability = (32 << 9);  // 32 in Q9.
+  handle->last_delay_probability = (32 << 9);  // 32 in Q9.
+
+  // Default return value if we're unable to estimate. -1 is used for errors.
+  handle->last_delay = -2;
+
+  return 0;
+}
+
+int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* handle,
+                                 uint32_t binary_far_spectrum,
+                                 uint32_t binary_near_spectrum) {
+  int i = 0;
+  int candidate_delay = -1;
+
+  int32_t value_best_candidate = 16384;  // 32 in Q9, (max |mean_bit_counts|).
+  int32_t value_worst_candidate = 0;
+
+  assert(handle != NULL);
+  // Shift binary spectrum history and insert current |binary_far_spectrum|.
+  memmove(&(handle->binary_far_history[1]), &(handle->binary_far_history[0]),
+          (handle->history_size - 1) * sizeof(uint32_t));
+  handle->binary_far_history[0] = binary_far_spectrum;
+
+  // Shift history of far-end binary spectrum bit counts and insert bit count
+  // of current |binary_far_spectrum|.
+  memmove(&(handle->far_bit_counts[1]), &(handle->far_bit_counts[0]),
+          (handle->history_size - 1) * sizeof(int));
+  handle->far_bit_counts[0] = BitCount(binary_far_spectrum);
+
+  if (handle->near_history_size > 1) {
+    // If we apply lookahead, shift near-end binary spectrum history. Insert
+    // current |binary_near_spectrum| and pull out the delayed one.
+    memmove(&(handle->binary_near_history[1]),
+            &(handle->binary_near_history[0]),
+            (handle->near_history_size - 1) * sizeof(uint32_t));
+    handle->binary_near_history[0] = binary_near_spectrum;
+    binary_near_spectrum =
+        handle->binary_near_history[handle->near_history_size - 1];
+  }
+
+  // Compare with delayed spectra and store the |bit_counts| for each delay.
+  BitCountComparison(binary_near_spectrum,
+                     handle->binary_far_history,
+                     handle->history_size,
+                     handle->bit_counts);
+
+  // Update |mean_bit_counts|, which is the smoothed version of |bit_counts|.
+  for (i = 0; i < handle->history_size; i++) {
+    // |bit_counts| is constrained to [0, 32], meaning we can smooth with a
+    // factor up to 2^26. We use Q9.
+    int32_t bit_count = (handle->bit_counts[i] << 9);  // Q9.
+
+    // Update |mean_bit_counts| only when far-end signal has something to
+    // contribute. If |far_bit_counts| is zero the far-end signal is weak and
+    // we likely have a poor echo condition, hence don't update.
+    if (handle->far_bit_counts[i] > 0) {
+      // Make number of right shifts piecewise linear w.r.t. |far_bit_counts|.
+      int shifts = kShiftsAtZero;
+      shifts -= (kShiftsLinearSlope * handle->far_bit_counts[i]) >> 4;
+      WebRtc_MeanEstimatorFix(bit_count, shifts, &(handle->mean_bit_counts[i]));
+    }
+  }
+
+  // Find |candidate_delay|, |value_best_candidate| and |value_worst_candidate|
+  // of |mean_bit_counts|.
+  for (i = 0; i < handle->history_size; i++) {
+    if (handle->mean_bit_counts[i] < value_best_candidate) {
+      value_best_candidate = handle->mean_bit_counts[i];
+      candidate_delay = i;
+    }
+    if (handle->mean_bit_counts[i] > value_worst_candidate) {
+      value_worst_candidate = handle->mean_bit_counts[i];
+    }
+  }
+
+  // The |value_best_candidate| is a good indicator on the probability of
+  // |candidate_delay| being an accurate delay (a small |value_best_candidate|
+  // means a good binary match). In the following sections we make a decision
+  // whether to update |last_delay| or not.
+  // 1) If the difference bit counts between the best and the worst delay
+  //    candidates is too small we consider the situation to be unreliable and
+  //    don't update |last_delay|.
+  // 2) If the situation is reliable we update |last_delay| if the value of the
+  //    best candidate delay has a value less than
+  //     i) an adaptive threshold |minimum_probability|, or
+  //    ii) this corresponding value |last_delay_probability|, but updated at
+  //        this time instant.
+
+  // Update |minimum_probability|.
+  if ((handle->minimum_probability > kProbabilityLowerLimit) &&
+      (value_worst_candidate - value_best_candidate > kProbabilityMinSpread)) {
+    // The "hard" threshold can't be lower than 17 (in Q9).
+    // The valley in the curve also has to be distinct, i.e., the
+    // difference between |value_worst_candidate| and |value_best_candidate| has
+    // to be large enough.
+    int32_t threshold = value_best_candidate + kProbabilityOffset;
+    if (threshold < kProbabilityLowerLimit) {
+      threshold = kProbabilityLowerLimit;
+    }
+    if (handle->minimum_probability > threshold) {
+      handle->minimum_probability = threshold;
+    }
+  }
+  // Update |last_delay_probability|.
+  // We use a Markov type model, i.e., a slowly increasing level over time.
+  handle->last_delay_probability++;
+  if (value_worst_candidate > value_best_candidate + kProbabilityOffset) {
+    // Reliable delay value for usage.
+    if (value_best_candidate < handle->minimum_probability) {
+      handle->last_delay = candidate_delay;
+    }
+    if (value_best_candidate < handle->last_delay_probability) {
+      handle->last_delay = candidate_delay;
+      // Reset |last_delay_probability|.
+      handle->last_delay_probability = value_best_candidate;
+    }
+  }
+
+  return handle->last_delay;
+}
+
+int WebRtc_binary_last_delay(BinaryDelayEstimator* handle) {
+  assert(handle != NULL);
+  return handle->last_delay;
+}
+
+int WebRtc_history_size(BinaryDelayEstimator* handle) {
+  assert(handle != NULL);
+  return handle->history_size;
+}
+
+void WebRtc_MeanEstimatorFix(int32_t new_value,
+                             int factor,
+                             int32_t* mean_value) {
+  int32_t diff = new_value - *mean_value;
+
+  // mean_new = mean_value + ((new_value - mean_value) >> factor);
+  if (diff < 0) {
+    diff = -((-diff) >> factor);
+  } else {
+    diff = (diff >> factor);
+  }
+  *mean_value += diff;
+}
diff --git a/src/modules/audio_processing/utility/delay_estimator.h b/src/modules/audio_processing/utility/delay_estimator.h
new file mode 100644
index 0000000..a376dfe
--- /dev/null
+++ b/src/modules/audio_processing/utility/delay_estimator.h
@@ -0,0 +1,128 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Performs delay estimation on binary converted spectra.
+// The return value is  0 - OK and -1 - Error, unless otherwise stated.
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_
+
+#include "typedefs.h"
+
+typedef struct {
+  // Pointer to bit counts.
+  int32_t* mean_bit_counts;
+  int* far_bit_counts;
+
+  // Array only used locally in ProcessBinarySpectrum() but whose size is
+  // determined at run-time.
+  int32_t* bit_counts;
+
+  // Binary history variables.
+  uint32_t* binary_far_history;
+  uint32_t* binary_near_history;
+
+  // Delay estimation variables.
+  int32_t minimum_probability;
+  int last_delay_probability;
+
+  // Delay memory.
+  int last_delay;
+
+  // Buffer size.
+  int history_size;
+
+  // Near-end buffer size.
+  int near_history_size;
+} BinaryDelayEstimator;
+
+// Releases the memory allocated by WebRtc_CreateBinaryDelayEstimator(...).
+// Input:
+//    - handle            : Pointer to the delay estimation instance.
+//
+int WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* handle);
+
+// Refer to WebRtc_CreateDelayEstimator() in delay_estimator_wrapper.h.
+int WebRtc_CreateBinaryDelayEstimator(BinaryDelayEstimator** handle,
+                                      int max_delay,
+                                      int lookahead);
+
+// Initializes the delay estimation instance created with
+// WebRtc_CreateBinaryDelayEstimator(...).
+// Input:
+//    - handle            : Pointer to the delay estimation instance.
+//
+// Output:
+//    - handle            : Initialized instance.
+//
+int WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* handle);
+
+// Estimates and returns the delay between the binary far-end and binary near-
+// end spectra. The value will be offset by the lookahead (i.e. the lookahead
+// should be subtracted from the returned value).
+// Inputs:
+//    - handle                : Pointer to the delay estimation instance.
+//    - binary_far_spectrum   : Far-end binary spectrum.
+//    - binary_near_spectrum  : Near-end binary spectrum of the current block.
+//
+// Output:
+//    - handle                : Updated instance.
+//
+// Return value:
+//    - delay                 :  >= 0 - Calculated delay value.
+//                              -1    - Error.
+//                              -2    - Insufficient data for estimation.
+//
+int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* handle,
+                                 uint32_t binary_far_spectrum,
+                                 uint32_t binary_near_spectrum);
+
+// Returns the last calculated delay updated by the function
+// WebRtc_ProcessBinarySpectrum(...).
+//
+// Input:
+//    - handle                : Pointer to the delay estimation instance.
+//
+// Return value:
+//    - delay                 :  >= 0 - Last calculated delay value
+//                              -1    - Error
+//                              -2    - Insufficient data for estimation.
+//
+int WebRtc_binary_last_delay(BinaryDelayEstimator* handle);
+
+// Returns the history size used in the far-end buffers to calculate the delay
+// over.
+//
+// Input:
+//    - handle                : Pointer to the delay estimation instance.
+//
+// Return value:
+//    - history_size          :  > 0  - Far-end history size.
+//                              -1    - Error.
+//
+int WebRtc_history_size(BinaryDelayEstimator* handle);
+
+// Updates the |mean_value| recursively with a step size of 2^-|factor|. This
+// function is used internally in the Binary Delay Estimator as well as the
+// Fixed point wrapper.
+//
+// Inputs:
+//    - new_value             : The new value the mean should be updated with.
+//    - factor                : The step size, in number of right shifts.
+//
+// Input/Output:
+//    - mean_value            : Pointer to the mean value.
+//
+void WebRtc_MeanEstimatorFix(int32_t new_value,
+                             int factor,
+                             int32_t* mean_value);
+
+
+#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_
diff --git a/src/modules/audio_processing/utility/delay_estimator_wrapper.c b/src/modules/audio_processing/utility/delay_estimator_wrapper.c
new file mode 100644
index 0000000..438c95f
--- /dev/null
+++ b/src/modules/audio_processing/utility/delay_estimator_wrapper.c
@@ -0,0 +1,336 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "delay_estimator_wrapper.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "delay_estimator.h"
+
+typedef union {
+  float float_;
+  int32_t int32_;
+} SpectrumType;
+
+typedef struct {
+  // Pointers to mean values of spectrum.
+  SpectrumType* mean_far_spectrum;
+  SpectrumType* mean_near_spectrum;
+  // |mean_*_spectrum| initialization indicator.
+  int far_spectrum_initialized;
+  int near_spectrum_initialized;
+
+  int spectrum_size;
+
+  // Binary spectrum based delay estimator
+  BinaryDelayEstimator* binary_handle;
+} DelayEstimator;
+
+// Only bit |kBandFirst| through bit |kBandLast| are processed and
+// |kBandFirst| - |kBandLast| must be < 32.
+static const int kBandFirst = 12;
+static const int kBandLast = 43;
+
+static __inline uint32_t SetBit(uint32_t in, int pos) {
+  uint32_t mask = (1 << pos);
+  uint32_t out = (in | mask);
+
+  return out;
+}
+
+// Calculates the mean recursively. Same version as WebRtc_MeanEstimatorFix(),
+// but for float.
+//
+// Inputs:
+//    - new_value             : New additional value.
+//    - scale                 : Scale for smoothing (should be less than 1.0).
+//
+// Input/Output:
+//    - mean_value            : Pointer to the mean value for updating.
+//
+static void MeanEstimatorFloat(float new_value,
+                               float scale,
+                               float* mean_value) {
+  assert(scale < 1.0f);
+  *mean_value += (new_value - *mean_value) * scale;
+}
+
+// Computes the binary spectrum by comparing the input |spectrum| with a
+// |threshold_spectrum|. Float and fixed point versions.
+//
+// Inputs:
+//      - spectrum            : Spectrum of which the binary spectrum should be
+//                              calculated.
+//      - threshold_spectrum  : Threshold spectrum with which the input
+//                              spectrum is compared.
+// Return:
+//      - out                 : Binary spectrum.
+//
+static uint32_t BinarySpectrumFix(uint16_t* spectrum,
+                                  SpectrumType* threshold_spectrum,
+                                  int q_domain,
+                                  int* threshold_initialized) {
+  int i = kBandFirst;
+  uint32_t out = 0;
+
+  assert(q_domain < 16);
+
+  if (!(*threshold_initialized)) {
+    // Set the |threshold_spectrum| to half the input |spectrum| as starting
+    // value. This speeds up the convergence.
+    for (i = kBandFirst; i <= kBandLast; i++) {
+      if (spectrum[i] > 0) {
+        // Convert input spectrum from Q(|q_domain|) to Q15.
+        int32_t spectrum_q15 = ((int32_t) spectrum[i]) << (15 - q_domain);
+        threshold_spectrum[i].int32_ = (spectrum_q15 >> 1);
+        *threshold_initialized = 1;
+      }
+    }
+  }
+  for (i = kBandFirst; i <= kBandLast; i++) {
+    // Convert input spectrum from Q(|q_domain|) to Q15.
+    int32_t spectrum_q15 = ((int32_t) spectrum[i]) << (15 - q_domain);
+    // Update the |threshold_spectrum|.
+    WebRtc_MeanEstimatorFix(spectrum_q15, 6, &(threshold_spectrum[i].int32_));
+    // Convert |spectrum| at current frequency bin to a binary value.
+    if (spectrum_q15 > threshold_spectrum[i].int32_) {
+      out = SetBit(out, i - kBandFirst);
+    }
+  }
+
+  return out;
+}
+
+static uint32_t BinarySpectrumFloat(float* spectrum,
+                                    SpectrumType* threshold_spectrum,
+                                    int* threshold_initialized) {
+  int i = kBandFirst;
+  uint32_t out = 0;
+  const float kScale = 1 / 64.0;
+
+  if (!(*threshold_initialized)) {
+    // Set the |threshold_spectrum| to half the input |spectrum| as starting
+    // value. This speeds up the convergence.
+    for (i = kBandFirst; i <= kBandLast; i++) {
+      if (spectrum[i] > 0.0f) {
+        threshold_spectrum[i].float_ = (spectrum[i] / 2);
+        *threshold_initialized = 1;
+      }
+    }
+  }
+
+  for (i = kBandFirst; i <= kBandLast; i++) {
+    // Update the |threshold_spectrum|.
+    MeanEstimatorFloat(spectrum[i], kScale, &(threshold_spectrum[i].float_));
+    // Convert |spectrum| at current frequency bin to a binary value.
+    if (spectrum[i] > threshold_spectrum[i].float_) {
+      out = SetBit(out, i - kBandFirst);
+    }
+  }
+
+  return out;
+}
+
+int WebRtc_FreeDelayEstimator(void* handle) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+
+  if (self == NULL) {
+    return -1;
+  }
+
+  if (self->mean_far_spectrum != NULL) {
+    free(self->mean_far_spectrum);
+    self->mean_far_spectrum = NULL;
+  }
+  if (self->mean_near_spectrum != NULL) {
+    free(self->mean_near_spectrum);
+    self->mean_near_spectrum = NULL;
+  }
+
+  WebRtc_FreeBinaryDelayEstimator(self->binary_handle);
+
+  free(self);
+
+  return 0;
+}
+
+int WebRtc_CreateDelayEstimator(void** handle,
+                                int spectrum_size,
+                                int max_delay,
+                                int lookahead) {
+  DelayEstimator* self = NULL;
+
+  // Check if the sub band used in the delay estimation is small enough to fit
+  // the binary spectra in a uint32_t.
+  assert(kBandLast - kBandFirst < 32);
+
+  if (handle == NULL) {
+    return -1;
+  }
+  if (spectrum_size < kBandLast) {
+    return -1;
+  }
+
+  self = malloc(sizeof(DelayEstimator));
+  *handle = self;
+  if (self == NULL) {
+    return -1;
+  }
+
+  self->mean_far_spectrum = NULL;
+  self->mean_near_spectrum = NULL;
+
+  // Create binary delay estimator.
+  if (WebRtc_CreateBinaryDelayEstimator(&self->binary_handle,
+                                        max_delay,
+                                        lookahead) != 0) {
+    WebRtc_FreeDelayEstimator(self);
+    self = NULL;
+    return -1;
+  }
+  // Allocate memory for spectrum buffers.
+  self->mean_far_spectrum = malloc(spectrum_size * sizeof(SpectrumType));
+  if (self->mean_far_spectrum == NULL) {
+    WebRtc_FreeDelayEstimator(self);
+    self = NULL;
+    return -1;
+  }
+  self->mean_near_spectrum = malloc(spectrum_size * sizeof(SpectrumType));
+  if (self->mean_near_spectrum == NULL) {
+    WebRtc_FreeDelayEstimator(self);
+    self = NULL;
+    return -1;
+  }
+
+  self->spectrum_size = spectrum_size;
+
+  return 0;
+}
+
+int WebRtc_InitDelayEstimator(void* handle) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+
+  if (self == NULL) {
+    return -1;
+  }
+
+  // Initialize binary delay estimator.
+  if (WebRtc_InitBinaryDelayEstimator(self->binary_handle) != 0) {
+    return -1;
+  }
+  // Set averaged far and near end spectra to zero.
+  memset(self->mean_far_spectrum, 0,
+         sizeof(SpectrumType) * self->spectrum_size);
+  memset(self->mean_near_spectrum, 0,
+         sizeof(SpectrumType) * self->spectrum_size);
+  // Reset initialization indicators.
+  self->far_spectrum_initialized = 0;
+  self->near_spectrum_initialized = 0;
+
+  return 0;
+}
+
+int WebRtc_DelayEstimatorProcessFix(void* handle,
+                                    uint16_t* far_spectrum,
+                                    uint16_t* near_spectrum,
+                                    int spectrum_size,
+                                    int far_q,
+                                    int near_q) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+  uint32_t binary_far_spectrum = 0;
+  uint32_t binary_near_spectrum = 0;
+
+  if (self == NULL) {
+    return -1;
+  }
+  if (far_spectrum == NULL) {
+    // Empty far end spectrum.
+    return -1;
+  }
+  if (near_spectrum == NULL) {
+    // Empty near end spectrum.
+    return -1;
+  }
+  if (spectrum_size != self->spectrum_size) {
+    // Data sizes don't match.
+    return -1;
+  }
+  if (far_q > 15) {
+    // If |far_q| is larger than 15 we cannot guarantee no wrap around.
+    return -1;
+  }
+  if (near_q > 15) {
+    // If |near_q| is larger than 15 we cannot guarantee no wrap around.
+    return -1;
+  }
+
+  // Get binary spectra.
+  binary_far_spectrum = BinarySpectrumFix(far_spectrum,
+                                          self->mean_far_spectrum,
+                                          far_q,
+                                          &(self->far_spectrum_initialized));
+  binary_near_spectrum = BinarySpectrumFix(near_spectrum,
+                                           self->mean_near_spectrum,
+                                           near_q,
+                                           &(self->near_spectrum_initialized));
+
+  return WebRtc_ProcessBinarySpectrum(self->binary_handle,
+                                      binary_far_spectrum,
+                                      binary_near_spectrum);
+}
+
+int WebRtc_DelayEstimatorProcessFloat(void* handle,
+                                      float* far_spectrum,
+                                      float* near_spectrum,
+                                      int spectrum_size) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+  uint32_t binary_far_spectrum = 0;
+  uint32_t binary_near_spectrum = 0;
+
+  if (self == NULL) {
+    return -1;
+  }
+  if (far_spectrum == NULL) {
+    // Empty far end spectrum.
+    return -1;
+  }
+  if (near_spectrum == NULL) {
+    // Empty near end spectrum.
+    return -1;
+  }
+  if (spectrum_size != self->spectrum_size) {
+    // Data sizes don't match.
+    return -1;
+  }
+
+  // Get binary spectra.
+  binary_far_spectrum = BinarySpectrumFloat(far_spectrum,
+                                            self->mean_far_spectrum,
+                                            &(self->far_spectrum_initialized));
+  binary_near_spectrum = BinarySpectrumFloat(near_spectrum,
+                                             self->mean_near_spectrum,
+                                             &(self->near_spectrum_initialized));
+
+  return WebRtc_ProcessBinarySpectrum(self->binary_handle,
+                                      binary_far_spectrum,
+                                      binary_near_spectrum);
+}
+
+int WebRtc_last_delay(void* handle) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+
+  if (self == NULL) {
+    return -1;
+  }
+
+  return WebRtc_binary_last_delay(self->binary_handle);
+}
diff --git a/src/modules/audio_processing/utility/delay_estimator_wrapper.h b/src/modules/audio_processing/utility/delay_estimator_wrapper.h
new file mode 100644
index 0000000..2a47b5d
--- /dev/null
+++ b/src/modules/audio_processing/utility/delay_estimator_wrapper.h
@@ -0,0 +1,110 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Performs delay estimation on block by block basis.
+// The return value is  0 - OK and -1 - Error, unless otherwise stated.
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_
+
+#include "typedefs.h"
+
+// Releases the memory allocated by WebRtc_CreateDelayEstimator(...)
+// Input:
+//      - handle        : Pointer to the delay estimation instance.
+//
+int WebRtc_FreeDelayEstimator(void* handle);
+
+// Allocates the memory needed by the delay estimation. The memory needs to be
+// initialized separately through WebRtc_InitDelayEstimator(...).
+//
+// Inputs:
+//      - handle        : Instance that should be created.
+//      - spectrum_size : Size of the spectrum used both in far-end and
+//                        near-end. Used to allocate memory for spectrum
+//                        specific buffers.
+//      - max_delay     : The maximum delay which can be estimated. Needed to
+//                        allocate memory for history buffers.
+//      - lookahead     : Amount of non-causal lookahead to use. This can
+//                        detect cases in which a near-end signal occurs before
+//                        the corresponding far-end signal. It will delay the
+//                        estimate for the current block by an equal amount,
+//                        and the returned values will be offset by it.
+//
+//                        A value of zero is the typical no-lookahead case.
+//                        This also represents the minimum delay which can be
+//                        estimated.
+//
+// Output:
+//      - handle        : Created instance.
+//
+int WebRtc_CreateDelayEstimator(void** handle,
+                                int spectrum_size,
+                                int max_delay,
+                                int lookahead);
+
+// Initializes the delay estimation instance created with
+// WebRtc_CreateDelayEstimator(...)
+// Input:
+//      - handle        : Pointer to the delay estimation instance.
+//
+// Output:
+//      - handle        : Initialized instance.
+//
+int WebRtc_InitDelayEstimator(void* handle);
+
+// Estimates and returns the delay between the far-end and near-end blocks. The
+// value will be offset by the lookahead (i.e. the lookahead should be
+// subtracted from the returned value).
+// Inputs:
+//      - handle        : Pointer to the delay estimation instance.
+//      - far_spectrum  : Pointer to the far-end spectrum data.
+//      - near_spectrum : Pointer to the near-end spectrum data of the current
+//                        block.
+//      - spectrum_size : The size of the data arrays (same for both far- and
+//                        near-end).
+//      - far_q         : The Q-domain of the far-end data.
+//      - near_q        : The Q-domain of the near-end data.
+//
+// Output:
+//      - handle        : Updated instance.
+//
+// Return value:
+//      - delay         :  >= 0 - Calculated delay value.
+//                        -1    - Error.
+//                        -2    - Insufficient data for estimation.
+//
+int WebRtc_DelayEstimatorProcessFix(void* handle,
+                                    uint16_t* far_spectrum,
+                                    uint16_t* near_spectrum,
+                                    int spectrum_size,
+                                    int far_q,
+                                    int near_q);
+
+// See WebRtc_DelayEstimatorProcessFix() for description.
+int WebRtc_DelayEstimatorProcessFloat(void* handle,
+                                      float* far_spectrum,
+                                      float* near_spectrum,
+                                      int spectrum_size);
+
+// Returns the last calculated delay updated by the function
+// WebRtc_DelayEstimatorProcess(...).
+//
+// Input:
+//      - handle        : Pointer to the delay estimation instance.
+//
+// Return value:
+//      - delay         :  >= 0 - Last calculated delay value.
+//                        -1    - Error.
+//                        -2    - Insufficient data for estimation.
+//
+int WebRtc_last_delay(void* handle);
+
+#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_
diff --git a/src/modules/audio_processing/utility/fft4g.c b/src/modules/audio_processing/utility/fft4g.c
index 9a84368..cbc4dc3 100644
--- a/src/modules/audio_processing/utility/fft4g.c
+++ b/src/modules/audio_processing/utility/fft4g.c
@@ -286,14 +286,24 @@
     w[] and ip[] are compatible with all routines.
 */
 
-void cdft(int n, int isgn, float *a, int *ip, float *w)
-{
-    void makewt(int nw, int *ip, float *w);
-    void bitrv2(int n, int *ip, float *a);
-    void bitrv2conj(int n, int *ip, float *a);
-    void cftfsub(int n, float *a, float *w);
-    void cftbsub(int n, float *a, float *w);
+static void makewt(int nw, int *ip, float *w);
+static void makect(int nc, int *ip, float *c);
+static void bitrv2(int n, int *ip, float *a);
+static void bitrv2conj(int n, int *ip, float *a);
+static void cftfsub(int n, float *a, float *w);
+static void cftbsub(int n, float *a, float *w);
+static void cft1st(int n, float *a, float *w);
+static void cftmdl(int n, int l, float *a, float *w);
+static void rftfsub(int n, float *a, int nc, float *c);
+static void rftbsub(int n, float *a, int nc, float *c);
+#if 0  // Not used.
+static void dctsub(int n, float *a, int nc, float *c)
+static void dstsub(int n, float *a, int nc, float *c)
+#endif
 
+
+void WebRtc_cdft(int n, int isgn, float *a, int *ip, float *w)
+{
     if (n > (ip[0] << 2)) {
         makewt(n >> 2, ip, w);
     }
@@ -311,15 +321,8 @@
 }
 
 
-void rdft(int n, int isgn, float *a, int *ip, float *w)
+void WebRtc_rdft(int n, int isgn, float *a, int *ip, float *w)
 {
-    void makewt(int nw, int *ip, float *w);
-    void makect(int nc, int *ip, float *c);
-    void bitrv2(int n, int *ip, float *a);
-    void cftfsub(int n, float *a, float *w);
-    void cftbsub(int n, float *a, float *w);
-    void rftfsub(int n, float *a, int nc, float *c);
-    void rftbsub(int n, float *a, int nc, float *c);
     int nw, nc;
     float xi;
 
@@ -357,17 +360,9 @@
     }
 }
 
-
-void ddct(int n, int isgn, float *a, int *ip, float *w)
+#if 0  // Not used.
+static void ddct(int n, int isgn, float *a, int *ip, float *w)
 {
-    void makewt(int nw, int *ip, float *w);
-    void makect(int nc, int *ip, float *c);
-    void bitrv2(int n, int *ip, float *a);
-    void cftfsub(int n, float *a, float *w);
-    void cftbsub(int n, float *a, float *w);
-    void rftfsub(int n, float *a, int nc, float *c);
-    void rftbsub(int n, float *a, int nc, float *c);
-    void dctsub(int n, float *a, int nc, float *c);
     int j, nw, nc;
     float xr;
 
@@ -417,16 +412,8 @@
 }
 
 
-void ddst(int n, int isgn, float *a, int *ip, float *w)
+static void ddst(int n, int isgn, float *a, int *ip, float *w)
 {
-    void makewt(int nw, int *ip, float *w);
-    void makect(int nc, int *ip, float *c);
-    void bitrv2(int n, int *ip, float *a);
-    void cftfsub(int n, float *a, float *w);
-    void cftbsub(int n, float *a, float *w);
-    void rftfsub(int n, float *a, int nc, float *c);
-    void rftbsub(int n, float *a, int nc, float *c);
-    void dstsub(int n, float *a, int nc, float *c);
     int j, nw, nc;
     float xr;
 
@@ -476,14 +463,8 @@
 }
 
 
-void dfct(int n, float *a, float *t, int *ip, float *w)
+static void dfct(int n, float *a, float *t, int *ip, float *w)
 {
-    void makewt(int nw, int *ip, float *w);
-    void makect(int nc, int *ip, float *c);
-    void bitrv2(int n, int *ip, float *a);
-    void cftfsub(int n, float *a, float *w);
-    void rftfsub(int n, float *a, int nc, float *c);
-    void dctsub(int n, float *a, int nc, float *c);
     int j, k, l, m, mh, nw, nc;
     float xr, xi, yr, yi;
 
@@ -571,15 +552,8 @@
     }
 }
 
-
-void dfst(int n, float *a, float *t, int *ip, float *w)
+static void dfst(int n, float *a, float *t, int *ip, float *w)
 {
-    void makewt(int nw, int *ip, float *w);
-    void makect(int nc, int *ip, float *c);
-    void bitrv2(int n, int *ip, float *a);
-    void cftfsub(int n, float *a, float *w);
-    void rftfsub(int n, float *a, int nc, float *c);
-    void dstsub(int n, float *a, int nc, float *c);
     int j, k, l, m, mh, nw, nc;
     float xr, xi, yr, yi;
 
@@ -657,6 +631,7 @@
     }
     a[0] = 0;
 }
+#endif  // Not used.
 
 
 /* -------- initializing routines -------- */
@@ -664,9 +639,8 @@
 
 #include <math.h>
 
-void makewt(int nw, int *ip, float *w)
+static void makewt(int nw, int *ip, float *w)
 {
-    void bitrv2(int n, int *ip, float *a);
     int j, nwh;
     float delta, x, y;
 
@@ -694,7 +668,7 @@
 }
 
 
-void makect(int nc, int *ip, float *c)
+static void makect(int nc, int *ip, float *c)
 {
     int j, nch;
     float delta;
@@ -716,7 +690,7 @@
 /* -------- child routines -------- */
 
 
-void bitrv2(int n, int *ip, float *a)
+static void bitrv2(int n, int *ip, float *a)
 {
     int j, j1, k, k1, l, m, m2;
     float xr, xi, yr, yi;
@@ -816,7 +790,7 @@
 }
 
 
-void bitrv2conj(int n, int *ip, float *a)
+static void bitrv2conj(int n, int *ip, float *a)
 {
     int j, j1, k, k1, l, m, m2;
     float xr, xi, yr, yi;
@@ -925,10 +899,8 @@
 }
 
 
-void cftfsub(int n, float *a, float *w)
+static void cftfsub(int n, float *a, float *w)
 {
-    void cft1st(int n, float *a, float *w);
-    void cftmdl(int n, int l, float *a, float *w);
     int j, j1, j2, j3, l;
     float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
 
@@ -977,10 +949,8 @@
 }
 
 
-void cftbsub(int n, float *a, float *w)
+static void cftbsub(int n, float *a, float *w)
 {
-    void cft1st(int n, float *a, float *w);
-    void cftmdl(int n, int l, float *a, float *w);
     int j, j1, j2, j3, l;
     float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
 
@@ -1029,7 +999,7 @@
 }
 
 
-void cft1st(int n, float *a, float *w)
+static void cft1st(int n, float *a, float *w)
 {
     int j, k1, k2;
     float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
@@ -1134,7 +1104,7 @@
 }
 
 
-void cftmdl(int n, int l, float *a, float *w)
+static void cftmdl(int n, int l, float *a, float *w)
 {
     int j, j1, j2, j3, k, k1, k2, m, m2;
     float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
@@ -1261,7 +1231,7 @@
 }
 
 
-void rftfsub(int n, float *a, int nc, float *c)
+static void rftfsub(int n, float *a, int nc, float *c)
 {
     int j, k, kk, ks, m;
     float wkr, wki, xr, xi, yr, yi;
@@ -1286,7 +1256,7 @@
 }
 
 
-void rftbsub(int n, float *a, int nc, float *c)
+static void rftbsub(int n, float *a, int nc, float *c)
 {
     int j, k, kk, ks, m;
     float wkr, wki, xr, xi, yr, yi;
@@ -1312,8 +1282,8 @@
     a[m + 1] = -a[m + 1];
 }
 
-
-void dctsub(int n, float *a, int nc, float *c)
+#if 0  // Not used.
+static void dctsub(int n, float *a, int nc, float *c)
 {
     int j, k, kk, ks, m;
     float wkr, wki, xr;
@@ -1334,7 +1304,7 @@
 }
 
 
-void dstsub(int n, float *a, int nc, float *c)
+static void dstsub(int n, float *a, int nc, float *c)
 {
     int j, k, kk, ks, m;
     float wkr, wki, xr;
@@ -1353,4 +1323,4 @@
     }
     a[m] *= c[0];
 }
-
+#endif  // Not used.
diff --git a/src/modules/audio_processing/utility/fft4g.h b/src/modules/audio_processing/utility/fft4g.h
index 373ff14..14a52a1 100644
--- a/src/modules/audio_processing/utility/fft4g.h
+++ b/src/modules/audio_processing/utility/fft4g.h
@@ -11,8 +11,7 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_FFT4G_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_FFT4G_H_
 
-void rdft(int, int, float *, int *, float *);
-void cdft(int, int, float *, int *, float *);
+void WebRtc_rdft(int, int, float *, int *, float *);
+void WebRtc_cdft(int, int, float *, int *, float *);
 
 #endif
-
diff --git a/src/modules/audio_processing/utility/ring_buffer.c b/src/modules/audio_processing/utility/ring_buffer.c
index ea2e354..8b2b436 100644
--- a/src/modules/audio_processing/utility/ring_buffer.c
+++ b/src/modules/audio_processing/utility/ring_buffer.c
@@ -8,232 +8,264 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-/*
- * Provides a generic ring buffer that can be written to and read from with
- * arbitrarily sized blocks. The AEC uses this for several different tasks.
- */
+// A ring buffer to hold arbitrary data. Provides no thread safety. Unless
+// otherwise specified, functions return 0 on success and -1 on error.
 
-#include <stdlib.h>
-#include <string.h>
 #include "ring_buffer.h"
 
+#include <stddef.h> // size_t
+#include <stdlib.h>
+#include <string.h>
+
+enum Wrap {
+  SAME_WRAP,
+  DIFF_WRAP
+};
+
 typedef struct {
-    int readPos;
-    int writePos;
-    int size;
-    char rwWrap;
-    bufdata_t *data;
+  size_t read_pos;
+  size_t write_pos;
+  size_t element_count;
+  size_t element_size;
+  enum Wrap rw_wrap;
+  char* data;
 } buf_t;
 
-enum {SAME_WRAP, DIFF_WRAP};
+// Get address of region(s) from which we can read data.
+// If the region is contiguous, |data_ptr_bytes_2| will be zero.
+// If non-contiguous, |data_ptr_bytes_2| will be the size in bytes of the second
+// region. Returns room available to be read or |element_count|, whichever is
+// smaller.
+static size_t GetBufferReadRegions(buf_t* buf,
+                                   size_t element_count,
+                                   void** data_ptr_1,
+                                   size_t* data_ptr_bytes_1,
+                                   void** data_ptr_2,
+                                   size_t* data_ptr_bytes_2) {
 
-int WebRtcApm_CreateBuffer(void **bufInst, int size)
-{
-    buf_t *buf = NULL;
+  const size_t readable_elements = WebRtc_available_read(buf);
+  const size_t read_elements = (readable_elements < element_count ?
+      readable_elements : element_count);
+  const size_t margin = buf->element_count - buf->read_pos;
 
-    if (size < 0) {
-        return -1;
-    }
+  // Check to see if read is not contiguous.
+  if (read_elements > margin) {
+    // Write data in two blocks that wrap the buffer.
+    *data_ptr_1 = buf->data + buf->read_pos * buf->element_size;
+    *data_ptr_bytes_1 = margin * buf->element_size;
+    *data_ptr_2 = buf->data;
+    *data_ptr_bytes_2 = (read_elements - margin) * buf->element_size;
+  } else {
+    *data_ptr_1 = buf->data + buf->read_pos * buf->element_size;
+    *data_ptr_bytes_1 = read_elements * buf->element_size;
+    *data_ptr_2 = NULL;
+    *data_ptr_bytes_2 = 0;
+  }
 
-    buf = malloc(sizeof(buf_t));
-    *bufInst = buf;
-    if (buf == NULL) {
-        return -1;
-    }
+  return read_elements;
+}
 
-    buf->data = malloc(size*sizeof(bufdata_t));
-    if (buf->data == NULL) {
-        free(buf);
-        buf = NULL;
-        return -1;
-    }
+int WebRtc_CreateBuffer(void** handle,
+                        size_t element_count,
+                        size_t element_size) {
+  buf_t* self = NULL;
 
-    buf->size = size;
+  if (handle == NULL) {
+    return -1;
+  }
+
+  self = malloc(sizeof(buf_t));
+  if (self == NULL) {
+    return -1;
+  }
+  *handle = self;
+
+  self->data = malloc(element_count * element_size);
+  if (self->data == NULL) {
+    free(self);
+    self = NULL;
+    return -1;
+  }
+
+  self->element_count = element_count;
+  self->element_size = element_size;
+
+  return 0;
+}
+
+int WebRtc_InitBuffer(void* handle) {
+  buf_t* self = (buf_t*) handle;
+
+  if (self == NULL) {
+    return -1;
+  }
+
+  self->read_pos = 0;
+  self->write_pos = 0;
+  self->rw_wrap = SAME_WRAP;
+
+  // Initialize buffer to zeros
+  memset(self->data, 0, self->element_count * self->element_size);
+
+  return 0;
+}
+
+int WebRtc_FreeBuffer(void* handle) {
+  buf_t* self = (buf_t*) handle;
+
+  if (self == NULL) {
+    return -1;
+  }
+
+  free(self->data);
+  free(self);
+
+  return 0;
+}
+
+size_t WebRtc_ReadBuffer(void* handle,
+                         void** data_ptr,
+                         void* data,
+                         size_t element_count) {
+
+  buf_t* self = (buf_t*) handle;
+
+  if (self == NULL) {
     return 0;
-}
-
-int WebRtcApm_InitBuffer(void *bufInst)
-{
-    buf_t *buf = (buf_t*)bufInst;
-
-    buf->readPos = 0;
-    buf->writePos = 0;
-    buf->rwWrap = SAME_WRAP;
-
-    // Initialize buffer to zeros
-    memset(buf->data, 0, sizeof(bufdata_t)*buf->size);
-
+  }
+  if (data == NULL) {
     return 0;
-}
-
-int WebRtcApm_FreeBuffer(void *bufInst)
-{
-    buf_t *buf = (buf_t*)bufInst;
-
-    if (buf == NULL) {
-        return -1;
-    }
-
-    free(buf->data);
-    free(buf);
-
+  }
+  if (data_ptr == NULL) {
     return 0;
+  }
+
+  {
+    void* buf_ptr_1 = NULL;
+    void* buf_ptr_2 = NULL;
+    size_t buf_ptr_bytes_1 = 0;
+    size_t buf_ptr_bytes_2 = 0;
+    const size_t read_count = GetBufferReadRegions(self,
+                                                   element_count,
+                                                   &buf_ptr_1,
+                                                   &buf_ptr_bytes_1,
+                                                   &buf_ptr_2,
+                                                   &buf_ptr_bytes_2);
+
+    if (buf_ptr_bytes_2 > 0) {
+      // We have a wrap around when reading the buffer. Copy the buffer data to
+      // |data| and point to it.
+      memcpy(data, buf_ptr_1, buf_ptr_bytes_1);
+      memcpy(((char*) data) + buf_ptr_bytes_1, buf_ptr_2, buf_ptr_bytes_2);
+      *data_ptr = data;
+    } else {
+      *data_ptr = buf_ptr_1;
+    }
+
+    // Update read position
+    WebRtc_MoveReadPtr(handle, (int) read_count);
+
+    return read_count;
+  }
 }
 
-int WebRtcApm_ReadBuffer(void *bufInst, bufdata_t *data, int size)
-{
-    buf_t *buf = (buf_t*)bufInst;
-    int n = 0, margin = 0;
+size_t WebRtc_WriteBuffer(void* handle,
+                          const void* data,
+                          size_t element_count) {
 
-    if (size <= 0 || size > buf->size) {
-        return -1;
+  buf_t* self = (buf_t*) handle;
+
+  if (self == NULL) {
+    return 0;
+  }
+  if (data == NULL) {
+    return 0;
+  }
+
+  {
+    const size_t free_elements = WebRtc_available_write(handle);
+    const size_t write_elements = (free_elements < element_count ? free_elements
+        : element_count);
+    size_t n = write_elements;
+    const size_t margin = self->element_count - self->write_pos;
+
+    if (write_elements > margin) {
+      // Buffer wrap around when writing.
+      memcpy(self->data + self->write_pos * self->element_size,
+             data, margin * self->element_size);
+      self->write_pos = 0;
+      n -= margin;
+      self->rw_wrap = DIFF_WRAP;
     }
+    memcpy(self->data + self->write_pos * self->element_size,
+           ((const char*) data) + ((write_elements - n) * self->element_size),
+           n * self->element_size);
+    self->write_pos += n;
 
-    n = size;
-    if (buf->rwWrap == DIFF_WRAP) {
-        margin = buf->size - buf->readPos;
-        if (n > margin) {
-            buf->rwWrap = SAME_WRAP;
-            memcpy(data, buf->data + buf->readPos,
-                sizeof(bufdata_t)*margin);
-            buf->readPos = 0;
-            n = size - margin;
-        }
-        else {
-            memcpy(data, buf->data + buf->readPos,
-                sizeof(bufdata_t)*n);
-            buf->readPos += n;
-            return n;
-        }
-    }
-
-    if (buf->rwWrap == SAME_WRAP) {
-        margin = buf->writePos - buf->readPos;
-        if (margin > n)
-            margin = n;
-        memcpy(data + size - n, buf->data + buf->readPos,
-            sizeof(bufdata_t)*margin);
-        buf->readPos += margin;
-        n -= margin;
-    }
-
-    return size - n;
+    return write_elements;
+  }
 }
 
-int WebRtcApm_WriteBuffer(void *bufInst, const bufdata_t *data, int size)
-{
-    buf_t *buf = (buf_t*)bufInst;
-    int n = 0, margin = 0;
+int WebRtc_MoveReadPtr(void* handle, int element_count) {
 
-    if (size < 0 || size > buf->size) {
-        return -1;
+  buf_t* self = (buf_t*) handle;
+
+  if (self == NULL) {
+    return 0;
+  }
+
+  {
+    // We need to be able to take care of negative changes, hence use "int"
+    // instead of "size_t".
+    const int free_elements = (int) WebRtc_available_write(handle);
+    const int readable_elements = (int) WebRtc_available_read(handle);
+    int read_pos = (int) self->read_pos;
+
+    if (element_count > readable_elements) {
+      element_count = readable_elements;
+    }
+    if (element_count < -free_elements) {
+      element_count = -free_elements;
     }
 
-    n = size;
-    if (buf->rwWrap == SAME_WRAP) {
-        margin = buf->size - buf->writePos;
-        if (n > margin) {
-            buf->rwWrap = DIFF_WRAP;
-            memcpy(buf->data + buf->writePos, data,
-                sizeof(bufdata_t)*margin);
-            buf->writePos = 0;
-            n = size - margin;
-        }
-        else {
-            memcpy(buf->data + buf->writePos, data,
-                sizeof(bufdata_t)*n);
-            buf->writePos += n;
-            return n;
-        }
+    read_pos += element_count;
+    if (read_pos > (int) self->element_count) {
+      // Buffer wrap around. Restart read position and wrap indicator.
+      read_pos -= (int) self->element_count;
+      self->rw_wrap = SAME_WRAP;
+    }
+    if (read_pos < 0) {
+      // Buffer wrap around. Restart read position and wrap indicator.
+      read_pos += (int) self->element_count;
+      self->rw_wrap = DIFF_WRAP;
     }
 
-    if (buf->rwWrap == DIFF_WRAP) {
-        margin = buf->readPos - buf->writePos;
-        if (margin > n)
-            margin = n;
-        memcpy(buf->data + buf->writePos, data + size - n,
-            sizeof(bufdata_t)*margin);
-        buf->writePos += margin;
-        n -= margin;
-    }
+    self->read_pos = (size_t) read_pos;
 
-    return size - n;
+    return element_count;
+  }
 }
 
-int WebRtcApm_FlushBuffer(void *bufInst, int size)
-{
-    buf_t *buf = (buf_t*)bufInst;
-    int n = 0, margin = 0;
+size_t WebRtc_available_read(const void* handle) {
+  const buf_t* self = (buf_t*) handle;
 
-    if (size <= 0 || size > buf->size) {
-        return -1;
-    }
+  if (self == NULL) {
+    return 0;
+  }
 
-    n = size;
-    if (buf->rwWrap == DIFF_WRAP) {
-        margin = buf->size - buf->readPos;
-        if (n > margin) {
-            buf->rwWrap = SAME_WRAP;
-            buf->readPos = 0;
-            n = size - margin;
-        }
-        else {
-            buf->readPos += n;
-            return n;
-        }
-    }
-
-    if (buf->rwWrap == SAME_WRAP) {
-        margin = buf->writePos - buf->readPos;
-        if (margin > n)
-            margin = n;
-        buf->readPos += margin;
-        n -= margin;
-    }
-
-    return size - n;
+  if (self->rw_wrap == SAME_WRAP) {
+    return self->write_pos - self->read_pos;
+  } else {
+    return self->element_count - self->read_pos + self->write_pos;
+  }
 }
 
-int WebRtcApm_StuffBuffer(void *bufInst, int size)
-{
-    buf_t *buf = (buf_t*)bufInst;
-    int n = 0, margin = 0;
+size_t WebRtc_available_write(const void* handle) {
+  const buf_t* self = (buf_t*) handle;
 
-    if (size <= 0 || size > buf->size) {
-        return -1;
-    }
+  if (self == NULL) {
+    return 0;
+  }
 
-    n = size;
-    if (buf->rwWrap == SAME_WRAP) {
-        margin = buf->readPos;
-        if (n > margin) {
-            buf->rwWrap = DIFF_WRAP;
-            buf->readPos = buf->size - 1;
-            n -= margin + 1;
-        }
-        else {
-            buf->readPos -= n;
-            return n;
-        }
-    }
-
-    if (buf->rwWrap == DIFF_WRAP) {
-        margin = buf->readPos - buf->writePos;
-        if (margin > n)
-            margin = n;
-        buf->readPos -= margin;
-        n -= margin;
-    }
-
-    return size - n;
-}
-
-int WebRtcApm_get_buffer_size(const void *bufInst)
-{
-    const buf_t *buf = (buf_t*)bufInst;
-
-    if (buf->rwWrap == SAME_WRAP)
-        return buf->writePos - buf->readPos;
-    else
-        return buf->size - buf->readPos + buf->writePos;
+  return self->element_count - WebRtc_available_read(handle);
 }
diff --git a/src/modules/audio_processing/utility/ring_buffer.h b/src/modules/audio_processing/utility/ring_buffer.h
index 0fd261d..3c44029 100644
--- a/src/modules/audio_processing/utility/ring_buffer.h
+++ b/src/modules/audio_processing/utility/ring_buffer.h
@@ -8,34 +8,46 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-/*
- * Specifies the interface for the AEC generic buffer.
- */
+// A ring buffer to hold arbitrary data. Provides no thread safety. Unless
+// otherwise specified, functions return 0 on success and -1 on error.
 
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_RING_BUFFER_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_RING_BUFFER_H_
 
-// Determines buffer datatype
-typedef short bufdata_t;
+#include <stddef.h> // size_t
 
-// Unless otherwise specified, functions return 0 on success and -1 on error
-int WebRtcApm_CreateBuffer(void **bufInst, int size);
-int WebRtcApm_InitBuffer(void *bufInst);
-int WebRtcApm_FreeBuffer(void *bufInst);
+int WebRtc_CreateBuffer(void** handle,
+                        size_t element_count,
+                        size_t element_size);
+int WebRtc_InitBuffer(void* handle);
+int WebRtc_FreeBuffer(void* handle);
 
-// Returns number of samples read
-int WebRtcApm_ReadBuffer(void *bufInst, bufdata_t *data, int size);
+// Reads data from the buffer. The |data_ptr| will point to the address where
+// it is located. If all |element_count| data are feasible to read without
+// buffer wrap around |data_ptr| will point to the location in the buffer.
+// Otherwise, the data will be copied to |data| (memory allocation done by the
+// user) and |data_ptr| points to the address of |data|. |data_ptr| is only
+// guaranteed to be valid until the next call to WebRtc_WriteBuffer().
+// Returns number of elements read.
+size_t WebRtc_ReadBuffer(void* handle,
+                         void** data_ptr,
+                         void* data,
+                         size_t element_count);
 
-// Returns number of samples written
-int WebRtcApm_WriteBuffer(void *bufInst, const bufdata_t *data, int size);
+// Writes |data| to buffer and returns the number of elements written.
+size_t WebRtc_WriteBuffer(void* handle, const void* data, size_t element_count);
 
-// Returns number of samples flushed
-int WebRtcApm_FlushBuffer(void *bufInst, int size);
+// Moves the buffer read position and returns the number of elements moved.
+// Positive |element_count| moves the read position towards the write position,
+// that is, flushing the buffer. Negative |element_count| moves the read
+// position away from the the write position, that is, stuffing the buffer.
+// Returns number of elements moved.
+int WebRtc_MoveReadPtr(void* handle, int element_count);
 
-// Returns number of samples stuffed
-int WebRtcApm_StuffBuffer(void *bufInst, int size);
+// Returns number of available elements to read.
+size_t WebRtc_available_read(const void* handle);
 
-// Returns number of samples in buffer
-int WebRtcApm_get_buffer_size(const void *bufInst);
+// Returns number of available elements for write.
+size_t WebRtc_available_write(const void* handle);
 
 #endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_RING_BUFFER_H_
diff --git a/src/modules/audio_processing/utility/util.gyp b/src/modules/audio_processing/utility/util.gypi
similarity index 77%
rename from src/modules/audio_processing/utility/util.gyp
rename to src/modules/audio_processing/utility/util.gypi
index 3348da8..3c3024a 100644
--- a/src/modules/audio_processing/utility/util.gyp
+++ b/src/modules/audio_processing/utility/util.gypi
@@ -7,23 +7,27 @@
 # be found in the AUTHORS file in the root of the source tree.
 
 {
-  'includes': [
-    '../../../common_settings.gypi',
-  ],
   'targets': [
     {
       'target_name': 'apm_util',
       'type': '<(library)',
+      'dependencies': [
+        '<(webrtc_root)/common_audio/common_audio.gyp:signal_processing',
+      ],
       'direct_dependent_settings': {
         'include_dirs': [
           '.',
         ],
       },
       'sources': [
-        'ring_buffer.c',
-        'ring_buffer.h',
+        'delay_estimator.c',
+        'delay_estimator.h',
+        'delay_estimator_wrapper.c',
+        'delay_estimator_wrapper.h',
         'fft4g.c',
         'fft4g.h',
+        'ring_buffer.c',
+        'ring_buffer.h',
       ],
     },
   ],
diff --git a/src/modules/audio_processing/main/source/voice_detection_impl.cc b/src/modules/audio_processing/voice_detection_impl.cc
similarity index 92%
rename from src/modules/audio_processing/main/source/voice_detection_impl.cc
rename to src/modules/audio_processing/voice_detection_impl.cc
index 3eb446e..49aac2e 100644
--- a/src/modules/audio_processing/main/source/voice_detection_impl.cc
+++ b/src/modules/audio_processing/voice_detection_impl.cc
@@ -74,16 +74,16 @@
 
   // TODO(ajm): concatenate data in frame buffer here.
 
-  int vad_ret_val;
-  vad_ret_val = WebRtcVad_Process(static_cast<Handle*>(handle(0)),
-                      apm_->split_sample_rate_hz(),
-                      mixed_data,
-                      frame_size_samples_);
-
-  if (vad_ret_val == 0) {
+  int vad_ret = WebRtcVad_Process(static_cast<Handle*>(handle(0)),
+                                  apm_->split_sample_rate_hz(),
+                                  mixed_data,
+                                  frame_size_samples_);
+  if (vad_ret == 0) {
     stream_has_voice_ = false;
-  } else if (vad_ret_val == 1) {
+    audio->set_activity(AudioFrame::kVadPassive);
+  } else if (vad_ret == 1) {
     stream_has_voice_ = true;
+    audio->set_activity(AudioFrame::kVadActive);
   } else {
     return apm_->kUnspecifiedError;
   }
diff --git a/src/modules/audio_processing/main/source/voice_detection_impl.h b/src/modules/audio_processing/voice_detection_impl.h
similarity index 100%
rename from src/modules/audio_processing/main/source/voice_detection_impl.h
rename to src/modules/audio_processing/voice_detection_impl.h
diff --git a/src/modules/interface/module.h b/src/modules/interface/module.h
index f270978..a274d95 100644
--- a/src/modules/interface/module.h
+++ b/src/modules/interface/module.h
@@ -1,33 +1,65 @@
-#ifndef MODULE_H
-#define MODULE_H
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_INTERFACE_MODULE_H_
+#define MODULES_INTERFACE_MODULE_H_
+
+#include <assert.h>
 
 #include "typedefs.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-class Module
-{
-public:
-    // Returns version of the module and its components.
-    virtual int32_t Version(char* version,
-                            uint32_t& remainingBufferInBytes,
-                            uint32_t& position) const = 0;
+class Module {
+ public:
+  // Change the unique identifier of this object.
+  virtual int32_t ChangeUniqueId(const int32_t id) = 0;
 
-    // Change the unique identifier of this object.
-    virtual int32_t ChangeUniqueId(const int32_t id) = 0;
+  // Returns the number of milliseconds until the module want a worker
+  // thread to call Process.
+  virtual int32_t TimeUntilNextProcess() = 0;
 
-    // Returns the number of milliseconds until the module want a worker
-    // thread to call Process.
-    virtual int32_t TimeUntilNextProcess() = 0 ;
+  // Process any pending tasks such as timeouts.
+  virtual int32_t Process() = 0;
 
-    // Process any pending tasks such as timeouts.
-    virtual int32_t Process() = 0 ;
-
-protected:
-    virtual ~Module() {}
+ protected:
+  virtual ~Module() {}
 };
 
-} // namespace webrtc
+// Reference counted version of the module interface.
+class RefCountedModule : public Module {
+ public:
+  // Increase the reference count by one.
+  // Returns the incremented reference count.
+  // TODO(perkj): Make this pure virtual when Chromium have implemented  
+  // reference counting ADM and Video capture module.
+  virtual int32_t AddRef() {
+    assert(!"Not implemented.");
+    return 1;
+  }
 
-#endif // MODULE_H
+  // Decrease the reference count by one.
+  // Returns the decreased reference count.
+  // Returns 0 if the last reference was just released.
+  // When the reference count reach 0 the object will self-destruct.
+  // TODO(perkj): Make this pure virtual when Chromium have implemented  
+  // reference counting ADM and Video capture module.
+  virtual int32_t Release() {
+    assert(!"Not implemented.");
+    return 1;
+  }
+
+ protected:
+  virtual ~RefCountedModule() {}
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_INTERFACE_MODULE_H_
diff --git a/src/modules/interface/module_common_types.h b/src/modules/interface/module_common_types.h
index 1865b0f..0319dfe 100644
--- a/src/modules/interface/module_common_types.h
+++ b/src/modules/interface/module_common_types.h
@@ -27,6 +27,11 @@
     WebRtc_UWord16 headerLength;
 };
 
+struct RTPHeaderExtension
+{
+    WebRtc_Word32  transmissionTimeOffset;
+};
+
 struct RTPAudioHeader
 {
     WebRtc_UWord8  numEnergy;                         // number of valid entries in arrOfEnergy
@@ -37,18 +42,48 @@
 
 struct RTPVideoHeaderH263
 {
+    void InitRTPVideoHeaderH263() {};
     bool independentlyDecodable;  // H.263-1998 if no P bit it's not independently decodable
     bool bits;                    // H.263 mode B, Xor the lasy byte of previus packet with the
                                   // first byte of this packet
 };
+
 enum {kNoPictureId = -1};
+enum {kNoTl0PicIdx = -1};
+enum {kNoTemporalIdx = -1};
+enum {kNoKeyIdx = -1};
+enum {kNoSimulcastIdx = 0};
+
 struct RTPVideoHeaderVP8
 {
-    bool           startBit;        // Start of partition.
-    bool           stopBit;         // Stop of partition.
-    WebRtc_Word16  pictureId;       // Picture ID index, 15 bits;
-                                    // kNoPictureId if PictureID does not exist.
-    bool           nonReference;    // Frame is discardable.
+    void InitRTPVideoHeaderVP8()
+    {
+        nonReference = false;
+        pictureId = kNoPictureId;
+        tl0PicIdx = kNoTl0PicIdx;
+        temporalIdx = kNoTemporalIdx;
+        layerSync = false;
+        keyIdx = kNoKeyIdx;
+        partitionId = 0;
+        beginningOfPartition = false;
+        frameWidth = 0;
+        frameHeight = 0;
+    }
+
+    bool           nonReference;   // Frame is discardable.
+    WebRtc_Word16  pictureId;      // Picture ID index, 15 bits;
+                                   // kNoPictureId if PictureID does not exist.
+    WebRtc_Word16  tl0PicIdx;      // TL0PIC_IDX, 8 bits;
+                                   // kNoTl0PicIdx means no value provided.
+    WebRtc_Word8   temporalIdx;    // Temporal layer index, or kNoTemporalIdx.
+    bool           layerSync;      // This frame is a layer sync frame.
+                                   // Disabled if temporalIdx == kNoTemporalIdx.
+    int            keyIdx;         // 5 bits; kNoKeyIdx means not used.
+    int            partitionId;    // VP8 partition ID
+    bool           beginningOfPartition;  // True if this packet is the first
+                                          // in a VP8 partition. Otherwise false
+    int            frameWidth;     // Exists for key frames.
+    int            frameHeight;    // Exists for key frames.
 };
 union RTPVideoTypeHeader
 {
@@ -72,6 +107,8 @@
     WebRtc_UWord16          height;
 
     bool                    isFirstPacket;   // first packet in frame
+    WebRtc_UWord8           simulcastIdx;    // Index if the simulcast encoder creating
+                                             // this frame, 0 if not using simulcast.
     RTPVideoCodecTypes      codec;
     RTPVideoTypeHeader      codecHeader;
 };
@@ -86,6 +123,7 @@
     RTPHeader       header;
     FrameType       frameType;
     RTPTypeHeader   type;
+    RTPHeaderExtension extension;
 };
 
 class RTPFragmentationHeader
@@ -360,11 +398,11 @@
 struct VideoContentMetrics
 {
     VideoContentMetrics(): motionMagnitudeNZ(0), sizeZeroMotion(0), spatialPredErr(0),
-            spatialPredErrH(0), spatialPredErrV(0), motionPredErr(0), 
+            spatialPredErrH(0), spatialPredErrV(0), motionPredErr(0),
             motionHorizontalness(0), motionClusterDistortion(0),
             nativeWidth(0), nativeHeight(0), contentChange(false) {   }
     void Reset(){ motionMagnitudeNZ = 0; sizeZeroMotion = 0; spatialPredErr = 0;
-            spatialPredErrH = 0; spatialPredErrV = 0; motionPredErr = 0; 
+            spatialPredErrH = 0; spatialPredErrV = 0; motionPredErr = 0;
             motionHorizontalness = 0; motionClusterDistortion = 0;
             nativeWidth = 0; nativeHeight = 0; contentChange = false; }
 
@@ -697,7 +735,7 @@
         const WebRtc_UWord32 timeStamp,
         const WebRtc_Word16* payloadData,
         const WebRtc_UWord16 payloadDataLengthInSamples,
-        const WebRtc_UWord32 frequencyInHz,
+        const int frequencyInHz,
         const SpeechType     speechType,
         const VADActivity    vadActivity,
         const WebRtc_UWord8  audioChannel = 1,
@@ -719,7 +757,7 @@
     // Supporting Stereo, stereo samples are interleaved
     mutable WebRtc_Word16 _payloadData[kMaxAudioFrameSizeSamples];
     WebRtc_UWord16 _payloadDataLengthInSamples;
-    WebRtc_UWord32 _frequencyInHz;
+    int _frequencyInHz;
     WebRtc_UWord8  _audioChannel;
     SpeechType   _speechType;
     VADActivity  _vadActivity;
@@ -756,7 +794,7 @@
     const WebRtc_UWord32 timeStamp,
     const WebRtc_Word16* payloadData,
     const WebRtc_UWord16 payloadDataLengthInSamples,
-    const WebRtc_UWord32 frequencyInHz,
+    const int frequencyInHz,
     const SpeechType     speechType,
     const VADActivity    vadActivity,
     const WebRtc_UWord8  audioChannel,
diff --git a/src/system_wrappers/OWNERS b/src/system_wrappers/OWNERS
index 32dcbbc..4091a93 100644
--- a/src/system_wrappers/OWNERS
+++ b/src/system_wrappers/OWNERS
@@ -1,7 +1,7 @@
-hellner@google.com
-pwestin@google.com
-perkj@google.com
-henrika@google.com
-grunell@google.com
-mflodman@google.com
-niklase@google.com
\ No newline at end of file
+henrike@webrtc.org
+pwestin@webrtc.org
+perkj@webrtc.org
+henrika@webrtc.org
+henrikg@webrtc.org
+mflodman@webrtc.org
+niklas.enbom@webrtc.org
\ No newline at end of file
diff --git a/src/system_wrappers/interface/cpu_features_wrapper.h b/src/system_wrappers/interface/cpu_features_wrapper.h
index 5d8a828..d949592 100644
--- a/src/system_wrappers/interface/cpu_features_wrapper.h
+++ b/src/system_wrappers/interface/cpu_features_wrapper.h
@@ -15,18 +15,33 @@
 extern "C" {
 #endif
 
-// list of features.
+#include <typedefs.h>
+
+// List of features in x86.
 typedef enum {
   kSSE2,
   kSSE3
 } CPUFeature;
 
+// List of features in ARM.
+enum {
+  kCPUFeatureARMv7       = (1 << 0),
+  kCPUFeatureVFPv3       = (1 << 1),
+  kCPUFeatureNEON        = (1 << 2),
+  kCPUFeatureLDREXSTREX  = (1 << 3)
+};
+
 typedef int (*WebRtc_CPUInfo)(CPUFeature feature);
 // returns true if the CPU supports the feature.
 extern WebRtc_CPUInfo WebRtc_GetCPUInfo;
 // No CPU feature is available => straight C path.
 extern WebRtc_CPUInfo WebRtc_GetCPUInfoNoASM;
 
+// Return the features in an ARM device.
+// It detects the features in the hardware platform, and returns supported 
+// values in the above enum definition as a bitmask.
+extern uint64_t WebRtc_GetCPUFeaturesARM(void);
+
 #if defined(__cplusplus) || defined(c_plusplus)
 }    // extern "C"
 #endif
diff --git a/src/system_wrappers/interface/cpu_info.h b/src/system_wrappers/interface/cpu_info.h
new file mode 100644
index 0000000..a6da29f
--- /dev/null
+++ b/src/system_wrappers/interface/cpu_info.h
@@ -0,0 +1,27 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_CPU_INFO_H_
+#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_CPU_INFO_H_
+
+#include "typedefs.h"
+
+namespace webrtc {
+class CpuInfo
+{
+public:
+    static WebRtc_UWord32 DetectNumberOfCores();
+
+private:
+    CpuInfo() {}
+    static WebRtc_UWord32 _numberOfCores;
+};
+} // namespace webrtc
+#endif // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_CPU_INFO_H_
diff --git a/src/system_wrappers/interface/cpu_wrapper.h b/src/system_wrappers/interface/cpu_wrapper.h
index b72c20c..d938741 100644
--- a/src/system_wrappers/interface/cpu_wrapper.h
+++ b/src/system_wrappers/interface/cpu_wrapper.h
@@ -17,8 +17,6 @@
 class CpuWrapper
 {
 public:
-    static WebRtc_UWord32 DetectNumberOfCores();
-
     static CpuWrapper* CreateCpu();
     virtual ~CpuWrapper() {}
 
@@ -34,6 +32,14 @@
     // Note that the pointer passed as cpu_usage is redirected to a local member
     // of the CPU wrapper.
     // numCores is the number of cores in the cpu_usage array.
+    // The return value is -1 for failure or 0-100, indicating the average
+    // CPU usage across all cores.
+    // Note: on some OSs this class is initialized lazy. This means that it
+    // might not yet be possible to retrieve any CPU metrics. When this happens
+    // the return value will be zero (indicating that there is not a failure),
+    // numCores will be 0 and cpu_usage will be set to NULL (indicating that
+    // no metrics are available yet). Once the initialization is completed,
+    // which can take in the order of seconds, CPU metrics can be retrieved.
     virtual WebRtc_Word32 CpuUsageMultiCore(WebRtc_UWord32& numCores,
                                             WebRtc_UWord32*& cpu_usage) = 0;
 
@@ -42,10 +48,6 @@
 
 protected:
     CpuWrapper() {}
-
-private:
-    static WebRtc_UWord32 _numberOfCores;
-
 };
 } // namespace webrtc
 #endif // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_CPU_WRAPPER_H_
diff --git a/src/system_wrappers/interface/critical_section_wrapper.h b/src/system_wrappers/interface/critical_section_wrapper.h
index ad31497..cfec9ae 100644
--- a/src/system_wrappers/interface/critical_section_wrapper.h
+++ b/src/system_wrappers/interface/critical_section_wrapper.h
@@ -33,18 +33,26 @@
     virtual void Leave() = 0;
 };
 
-// RAII extension of the critical section. Prevents Enter/Leave missmatches and
+// RAII extension of the critical section. Prevents Enter/Leave mismatches and
 // provides more compact critical section syntax.
 class CriticalSectionScoped
 {
 public:
-    CriticalSectionScoped(CriticalSectionWrapper& critsec)
-        :
-        _ptrCritSec(&critsec)
+    // Deprecated, don't add more users of this constructor.
+    // TODO(mflodman) Remove this version of the constructor when no one is
+    // using it any longer.
+    explicit CriticalSectionScoped(CriticalSectionWrapper& critsec)
+        : _ptrCritSec(&critsec)
     {
         _ptrCritSec->Enter();
     }
 
+    explicit CriticalSectionScoped(CriticalSectionWrapper* critsec)
+        : _ptrCritSec(critsec)
+    {
+      _ptrCritSec->Enter();
+    }
+
     ~CriticalSectionScoped()
     {
         if (_ptrCritSec)
diff --git a/src/system_wrappers/interface/data_log.h b/src/system_wrappers/interface/data_log.h
new file mode 100644
index 0000000..6fc1d64
--- /dev/null
+++ b/src/system_wrappers/interface/data_log.h
@@ -0,0 +1,121 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This singleton can be used for logging data for offline processing. Data
+ * logged with it can conveniently be parsed and processed with e.g. Matlab.
+ *
+ * Following is an example of the log file format, starting with the header
+ * row at line 1, and the data rows following.
+ * col1,col2,col3,multi-value-col4[3],,,col5
+ * 123,10.2,-243,1,2,3,100
+ * 241,12.3,233,1,2,3,200
+ * 13,16.4,-13,1,2,3,300
+ *
+ * As can be seen in the example, a multi-value-column is specified with the
+ * name followed the number of elements it contains. This followed by
+ * number of elements - 1 empty columns.
+ *
+ * Without multi-value-columns this format can be natively by Matlab. With
+ * multi-value-columns a small Matlab script is needed, available at
+ * trunk/tools/matlab/parseLog.m.
+ *
+ * Table names and column names are case sensitive.
+ */
+
+#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_DATA_LOG_H_
+#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_DATA_LOG_H_
+
+#include <string>
+
+#include "data_log_impl.h"
+
+namespace webrtc {
+
+class DataLog {
+ public:
+  // Creates a log which uses a separate thread (referred to as the file
+  // writer thread) for writing log rows to file.
+  //
+  // Calls to this function after the log object has been created will only
+  // increment the reference counter.
+  static int CreateLog();
+
+  // Decrements the reference counter and deletes the log when the counter
+  // reaches 0. Should be called equal number of times as successful calls to
+  // CreateLog or memory leak will occur.
+  static void ReturnLog();
+
+  // Combines the string table_name and the integer table_id into a new string
+  // table_name + _ + table_id. The new string will be lower-case.
+  static std::string Combine(const std::string& table_name, int table_id);
+
+  // Adds a new table, with the name table_name, and creates the file, with the
+  // name table_name + ".txt", to which the table will be written.
+  // table_name is treated in a case sensitive way.
+  static int AddTable(const std::string& table_name);
+
+  // Adds a new column to a table. The column will be a multi-value-column
+  // if multi_value_length is greater than 1.
+  // table_name and column_name are treated in a case sensitive way.
+  static int AddColumn(const std::string& table_name,
+                       const std::string& column_name,
+                       int multi_value_length);
+
+  // Inserts a single value into a table with name table_name at the column with
+  // name column_name.
+  // Note that the ValueContainer makes use of the copy constructor,
+  // operator= and operator<< of the type T, and that the template type must
+  // implement a deep copy copy constructor and operator=.
+  // Copy constructor and operator= must not be disabled for the type T.
+  // table_name and column_name are treated in a case sensitive way.
+  template<class T>
+  static int InsertCell(const std::string& table_name,
+                        const std::string& column_name,
+                        T value) {
+    DataLogImpl* data_log = DataLogImpl::StaticInstance();
+    if (data_log == NULL)
+      return -1;
+    return data_log->InsertCell(
+             table_name,
+             column_name,
+             new ValueContainer<T>(value));
+  }
+
+  // Inserts an array of values into a table with name table_name at the
+  // column specified by column_name, which must be a multi-value-column.
+  // Note that the MultiValueContainer makes use of the copy constructor,
+  // operator= and operator<< of the type T, and that the template type
+  // must implement a deep copy copy constructor and operator=.
+  // Copy constructor and operator= must not be disabled for the type T.
+  // table_name and column_name are treated in a case sensitive way.
+  template<class T>
+  static int InsertCell(const std::string& table_name,
+                        const std::string& column_name,
+                        const T* array,
+                        int length) {
+    DataLogImpl* data_log = DataLogImpl::StaticInstance();
+    if (data_log == NULL)
+      return -1;
+    return data_log->InsertCell(
+             table_name,
+             column_name,
+             new MultiValueContainer<T>(array, length));
+  }
+
+  // For the table with name table_name: Writes the current row to file.
+  // Starts a new empty row.
+  // table_name is treated in a case-sensitive way.
+  static int NextRow(const std::string& table_name);
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_DATA_LOG_H_
diff --git a/src/system_wrappers/interface/data_log_c.h b/src/system_wrappers/interface/data_log_c.h
new file mode 100644
index 0000000..fffbb4f
--- /dev/null
+++ b/src/system_wrappers/interface/data_log_c.h
@@ -0,0 +1,89 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This is a pure C wrapper of the DataLog class. The functions are directly
+ * mapped here except for InsertCell as C does not support templates.
+ * See data_log.h for a description of the functions.
+ */
+
+#ifndef SRC_SYSTEM_WRAPPERS_INTERFACE_DATA_LOG_C_H_
+#define SRC_SYSTEM_WRAPPERS_INTERFACE_DATA_LOG_C_H_
+
+#include <stddef.h>  /* size_t */
+
+#include "typedefs.h"  /* NOLINT(build/include) */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * All char* parameters in this file are expected to be null-terminated
+ * character sequences.
+ */
+int WebRtcDataLog_CreateLog();
+void WebRtcDataLog_ReturnLog();
+char* WebRtcDataLog_Combine(char* combined_name, size_t combined_len,
+                            const char* table_name, int table_id);
+int WebRtcDataLog_AddTable(const char* table_name);
+int WebRtcDataLog_AddColumn(const char* table_name, const char* column_name,
+                            int multi_value_length);
+
+int WebRtcDataLog_InsertCell_int(const char* table_name,
+                                 const char* column_name,
+                                 int value);
+int WebRtcDataLog_InsertArray_int(const char* table_name,
+                                  const char* column_name,
+                                  const int* values,
+                                  int length);
+int WebRtcDataLog_InsertCell_float(const char* table_name,
+                                   const char* column_name,
+                                   float value);
+int WebRtcDataLog_InsertArray_float(const char* table_name,
+                                    const char* column_name,
+                                    const float* values,
+                                    int length);
+int WebRtcDataLog_InsertCell_double(const char* table_name,
+                                    const char* column_name,
+                                    double value);
+int WebRtcDataLog_InsertArray_double(const char* table_name,
+                                     const char* column_name,
+                                     const double* values,
+                                     int length);
+int WebRtcDataLog_InsertCell_int32(const char* table_name,
+                                   const char* column_name,
+                                   int32_t value);
+int WebRtcDataLog_InsertArray_int32(const char* table_name,
+                                    const char* column_name,
+                                    const int32_t* values,
+                                    int length);
+int WebRtcDataLog_InsertCell_uint32(const char* table_name,
+                                    const char* column_name,
+                                    uint32_t value);
+int WebRtcDataLog_InsertArray_uint32(const char* table_name,
+                                     const char* column_name,
+                                     const uint32_t* values,
+                                     int length);
+int WebRtcDataLog_InsertCell_int64(const char* table_name,
+                                   const char* column_name,
+                                   int64_t value);
+int WebRtcDataLog_InsertArray_int64(const char* table_name,
+                                    const char* column_name,
+                                    const int64_t* values,
+                                    int length);
+
+int WebRtcDataLog_NextRow(const char* table_name);
+
+#ifdef __cplusplus
+}  /* end of extern "C" */
+#endif
+
+#endif  /* SRC_SYSTEM_WRAPPERS_INTERFACE_DATA_LOG_C_H_ */ /* NOLINT */
diff --git a/src/system_wrappers/interface/data_log_impl.h b/src/system_wrappers/interface/data_log_impl.h
new file mode 100644
index 0000000..cef4964
--- /dev/null
+++ b/src/system_wrappers/interface/data_log_impl.h
@@ -0,0 +1,157 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains the helper classes for the DataLog APIs. See data_log.h
+ * for the APIs.
+ *
+ * These classes are helper classes used for logging data for offline
+ * processing. Data logged with these classes can conveniently be parsed and
+ * processed with e.g. Matlab.
+ */
+#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_DATA_LOG_IMPL_H_
+#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_DATA_LOG_IMPL_H_
+
+#include <map>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "scoped_ptr.h"
+#include "typedefs.h"
+
+namespace webrtc {
+
+class CriticalSectionWrapper;
+class EventWrapper;
+class LogTable;
+class RWLockWrapper;
+class ThreadWrapper;
+
+// All container classes need to implement a ToString-function to be
+// writable to file. Enforce this via the Container interface.
+class Container {
+ public:
+  virtual ~Container() {}
+
+  virtual void ToString(std::string* container_string) const = 0;
+};
+
+template<class T>
+class ValueContainer : public Container {
+ public:
+  explicit ValueContainer(T data) : data_(data) {}
+
+  virtual void ToString(std::string* container_string) const {
+    *container_string = "";
+    std::stringstream ss;
+    ss << data_ << ",";
+    ss >> *container_string;
+  }
+
+ private:
+  T   data_;
+};
+
+template<class T>
+class MultiValueContainer : public Container {
+ public:
+  MultiValueContainer(const T* data, int length)
+    : data_(data, data + length) {
+  }
+
+  virtual void ToString(std::string* container_string) const {
+    *container_string = "";
+    std::stringstream ss;
+    for (size_t i = 0; i < data_.size(); ++i)
+      ss << data_[i] << ",";
+    *container_string += ss.str();
+  }
+
+ private:
+  std::vector<T>  data_;
+};
+
+class DataLogImpl {
+ public:
+  ~DataLogImpl();
+
+  // The implementation of the CreateLog() method declared in data_log.h.
+  // See data_log.h for a description.
+  static int CreateLog();
+
+  // The implementation of the StaticInstance() method declared in data_log.h.
+  // See data_log.h for a description.
+  static DataLogImpl* StaticInstance();
+
+  // The implementation of the ReturnLog() method declared in data_log.h. See
+  // data_log.h for a description.
+  static void ReturnLog();
+
+  // The implementation of the AddTable() method declared in data_log.h. See
+  // data_log.h for a description.
+  int AddTable(const std::string& table_name);
+
+  // The implementation of the AddColumn() method declared in data_log.h. See
+  // data_log.h for a description.
+  int AddColumn(const std::string& table_name,
+                const std::string& column_name,
+                int multi_value_length);
+
+  // Inserts a Container into a table with name table_name at the column
+  // with name column_name.
+  // column_name is treated in a case sensitive way.
+  int InsertCell(const std::string& table_name,
+                 const std::string& column_name,
+                 const Container* value_container);
+
+  // The implementation of the NextRow() method declared in data_log.h. See
+  // data_log.h for a description.
+  int NextRow(const std::string& table_name);
+
+ private:
+  DataLogImpl();
+
+  // Initializes the DataLogImpl object, allocates and starts the
+  // thread file_writer_thread_.
+  int Init();
+
+  // Write all complete rows in every table to file.
+  // This function should only be called by the file_writer_thread_ if that
+  // thread is running to avoid race conditions.
+  void Flush();
+
+  // Run() is called by the thread file_writer_thread_.
+  static bool Run(void* obj);
+
+  // This function writes data to file. Note, it blocks if there is no data
+  // that should be written to file availble. Flush is the non-blocking
+  // version of this function.
+  void Process();
+
+  // Stops the continuous calling of Process().
+  void StopThread();
+
+  // Collection of tables indexed by the table name as std::string.
+  typedef std::map<std::string, LogTable*> TableMap;
+  typedef webrtc::scoped_ptr<CriticalSectionWrapper> CritSectScopedPtr;
+
+  static CritSectScopedPtr  crit_sect_;
+  static DataLogImpl*       instance_;
+  int                       counter_;
+  TableMap                  tables_;
+  EventWrapper*             flush_event_;
+  ThreadWrapper*            file_writer_thread_;
+  RWLockWrapper*            tables_lock_;
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_DATA_LOG_IMPL_H_
diff --git a/src/system_wrappers/interface/file_wrapper.h b/src/system_wrappers/interface/file_wrapper.h
index 8f0cd8c..4d17438 100644
--- a/src/system_wrappers/interface/file_wrapper.h
+++ b/src/system_wrappers/interface/file_wrapper.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_FILE_WRAPPER_H_
 #define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_FILE_WRAPPER_H_
 
+#include <stddef.h>
+
 #include "common_types.h"
 #include "typedefs.h"
 
@@ -18,11 +20,11 @@
 // write from/to a file.
 
 namespace webrtc {
+
 class FileWrapper : public InStream, public OutStream
 {
 public:
-    enum { kMaxFileNameSize = 1024};
-    enum { kFileMaxTextMessageSize = 1024};
+    static const size_t kMaxFileNameSize = 1024;
 
     // Factory method. Constructor disabled.
     static FileWrapper* Create();
@@ -31,42 +33,47 @@
     virtual bool Open() const = 0;
 
     // Opens a file in read or write mode, decided by the readOnly parameter.
-    virtual WebRtc_Word32 OpenFile(const WebRtc_Word8* fileNameUTF8,
-                                   const bool readOnly,
-                                   const bool loop = false,
-                                   const bool text = false) = 0;
+    virtual int OpenFile(const char* fileNameUTF8,
+                         bool readOnly,
+                         bool loop = false,
+                         bool text = false) = 0;
 
-    virtual WebRtc_Word32 CloseFile() = 0;
+    virtual int CloseFile() = 0;
 
-    // Limits the file size.
-    virtual WebRtc_Word32 SetMaxFileSize(WebRtc_Word32 bytes)  = 0;
+    // Limits the file size to |bytes|. Writing will fail after the cap
+    // is hit. Pass zero to use an unlimited size.
+    virtual int SetMaxFileSize(size_t bytes)  = 0;
 
     // Flush any pending writes.
-    virtual WebRtc_Word32 Flush() = 0;
+    virtual int Flush() = 0;
 
-    // Returns the opened file's name in fileNameUTF8. size is the allocated
-    // size of fileNameUTF8. The name will be truncated if the size of
-    // fileNameUTF8 is to small.
-    virtual WebRtc_Word32 FileName(WebRtc_Word8* fileNameUTF8,
-                                   WebRtc_UWord32 size) const = 0;
+    // Returns the opened file's name in |fileNameUTF8|. Provide the size of
+    // the buffer in bytes in |size|. The name will be truncated if |size| is
+    // too small.
+    virtual int FileName(char* fileNameUTF8,
+                         size_t size) const = 0;
 
-    // Write text to the opened file. The written text can contain plain text
-    // and text with type specifiers in the same way as sprintf works.
-    virtual WebRtc_Word32 WriteText(const WebRtc_Word8* text, ...) = 0;
+    // Write |format| to the opened file. Arguments are taken in the same manner
+    // as printf. That is, supply a format string containing text and
+    // specifiers. Returns the number of characters written or -1 on error.
+    virtual int WriteText(const char* format, ...) = 0;
 
-    // Reads len number of bytes from buf to file.
-    virtual int Read(void* buf, int len) = 0;
+    // Inherited from Instream.
+    // Reads |length| bytes from file to |buf|. Returns the number of bytes read
+    // or -1 on error.
+    virtual int Read(void* buf, int length) = 0;
 
-    // Writes len number of bytes to buf from file. Please note that the actual
-    // writing to file may happen some time later. Call flush to force a write
-    // to take affect
-    virtual bool Write(const void *buf,int len) = 0;
+    // Inherited from OutStream.
+    // Writes |length| bytes from |buf| to file. The actual writing may happen
+    // some time later. Call Flush() to force a write.
+    virtual bool Write(const void *buf, int length) = 0;
 
+    // Inherited from both Instream and OutStream.
     // Rewinds the file to the start. Only available when OpenFile() has been
-    // called with loop argument set to true. Or readOnly argument has been set
-    // to false.
+    // called with |loop| == true or |readOnly| == true.
     virtual int Rewind() = 0;
 };
+
 } // namespace webrtc
 
 #endif // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_FILE_WRAPPER_H_
diff --git a/src/system_wrappers/interface/list_wrapper.h b/src/system_wrappers/interface/list_wrapper.h
index bc10ad4..3608ada 100644
--- a/src/system_wrappers/interface/list_wrapper.h
+++ b/src/system_wrappers/interface/list_wrapper.h
@@ -34,7 +34,6 @@
 private:
     const void*         item_ptr_;
     const unsigned int  item_;
-    DISALLOW_COPY_AND_ASSIGN(ListItem);
 };
 
 class ListWrapper
@@ -102,7 +101,6 @@
     ListItem* first_;
     ListItem* last_;
     unsigned int size_;
-    DISALLOW_COPY_AND_ASSIGN(ListWrapper);
 };
 } //namespace webrtc
 
diff --git a/src/system_wrappers/interface/map_wrapper.h b/src/system_wrappers/interface/map_wrapper.h
index 9297382..7d4e733 100644
--- a/src/system_wrappers/interface/map_wrapper.h
+++ b/src/system_wrappers/interface/map_wrapper.h
@@ -31,7 +31,6 @@
 private:
     int   item_id_;
     void* item_pointer_;
-    DISALLOW_COPY_AND_ASSIGN(MapItem);
 };
 
 class MapWrapper
@@ -70,7 +69,6 @@
 
 private:
     std::map<int, MapItem*>    map_;
-    DISALLOW_COPY_AND_ASSIGN(MapWrapper);
 };
 } // namespace webrtc
 
diff --git a/src/system_wrappers/interface/ref_count.h b/src/system_wrappers/interface/ref_count.h
new file mode 100644
index 0000000..f90b0b3
--- /dev/null
+++ b/src/system_wrappers/interface/ref_count.h
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INTERFACE_REF_COUNT_H_
+#define SYSTEM_WRAPPERS_INTERFACE_REF_COUNT_H_
+
+#include "system_wrappers/interface/atomic32_wrapper.h"
+
+namespace webrtc {
+
+// This class can be used for instantiating
+// reference counted objects.
+// int32_t AddRef() and int32_t Release().
+// Usage:
+// RefCountImpl<T>* implementation = new RefCountImpl<T>(p);
+//
+// Example:
+// class MyInterface {
+//  public:
+//   virtual void DoSomething() = 0;
+//   virtual int32_t AddRef() = 0;
+//   virtual int32_t Release() = 0:
+//  private:
+//   virtual ~MyInterface(){};
+// }
+// class MyImplementation : public MyInterface {
+//  public:
+//   virtual DoSomething() { printf("hello"); };
+// };
+// MyImplementation* CreateMyImplementation() {
+//   RefCountImpl<MyImplementation>* implementation =
+//       new RefCountImpl<MyImplementation>();
+//   return implementation;
+// }
+
+template <class T>
+class RefCountImpl : public T {
+ public:
+  RefCountImpl() : ref_count_(0) {}
+
+  template<typename P>
+  explicit RefCountImpl(P p) : T(p), ref_count_(0) {}
+
+  template<typename P1, typename P2>
+  RefCountImpl(P1 p1, P2 p2) : T(p1, p2), ref_count_(0) {}
+
+  template<typename P1, typename P2, typename P3>
+  RefCountImpl(P1 p1, P2 p2, P3 p3) : T(p1, p2, p3), ref_count_(0) {}
+
+  template<typename P1, typename P2, typename P3, typename P4>
+  RefCountImpl(P1 p1, P2 p2, P3 p3, P4 p4) : T(p1, p2, p3, p4), ref_count_(0) {}
+
+  template<typename P1, typename P2, typename P3, typename P4, typename P5>
+  RefCountImpl(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5)
+      : T(p1, p2, p3, p4, p5), ref_count_(0) {}
+
+  virtual int32_t AddRef() {
+    return ++ref_count_;
+  }
+
+  virtual int32_t Release() {
+    int32_t ref_count;
+    ref_count = --ref_count_;
+    if (ref_count == 0)
+      delete this;
+    return ref_count;
+  }
+
+ protected:
+  Atomic32Wrapper ref_count_;
+};
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INTERFACE_REF_COUNT_H_
diff --git a/src/system_wrappers/interface/scoped_ptr.h b/src/system_wrappers/interface/scoped_ptr.h
new file mode 100644
index 0000000..74b6ad3
--- /dev/null
+++ b/src/system_wrappers/interface/scoped_ptr.h
@@ -0,0 +1,258 @@
+//  (C) Copyright Greg Colvin and Beman Dawes 1998, 1999.
+//  Copyright (c) 2001, 2002 Peter Dimov
+//
+//  Permission to copy, use, modify, sell and distribute this software
+//  is granted provided this copyright notice appears in all copies.
+//  This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+//
+//  See http://www.boost.org/libs/smart_ptr/scoped_ptr.htm for documentation.
+//
+
+//  scoped_ptr mimics a built-in pointer except that it guarantees deletion
+//  of the object pointed to, either on destruction of the scoped_ptr or via
+//  an explicit reset(). scoped_ptr is a simple solution for simple needs;
+//  use shared_ptr or std::auto_ptr if your needs are more complex.
+
+//  scoped_ptr_malloc added in by Google.  When one of
+//  these goes out of scope, instead of doing a delete or delete[], it
+//  calls free().  scoped_ptr_malloc<char> is likely to see much more
+//  use than any other specializations.
+
+//  release() added in by Google. Use this to conditionally
+//  transfer ownership of a heap-allocated object to the caller, usually on
+//  method success.
+#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_SCOPED_PTR_H_
+#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_SCOPED_PTR_H_
+
+#include <assert.h>            // for assert
+#include <stdlib.h>            // for free() decl
+
+#include <cstddef>             // for std::ptrdiff_t
+
+#ifdef _WIN32
+namespace std { using ::ptrdiff_t; };
+#endif // _WIN32
+
+namespace webrtc {
+
+template <typename T>
+class scoped_ptr {
+ private:
+
+  T* ptr;
+
+  scoped_ptr(scoped_ptr const &);
+  scoped_ptr & operator=(scoped_ptr const &);
+
+ public:
+
+  typedef T element_type;
+
+  explicit scoped_ptr(T* p = NULL): ptr(p) {}
+
+  ~scoped_ptr() {
+    typedef char type_must_be_complete[sizeof(T)];
+    delete ptr;
+  }
+
+  void reset(T* p = NULL) {
+    typedef char type_must_be_complete[sizeof(T)];
+
+    if (ptr != p) {
+      T* obj = ptr;
+      ptr = p;
+      // Delete last, in case obj destructor indirectly results in ~scoped_ptr
+      delete obj;
+    }
+  }
+
+  T& operator*() const {
+    assert(ptr != NULL);
+    return *ptr;
+  }
+
+  T* operator->() const  {
+    assert(ptr != NULL);
+    return ptr;
+  }
+
+  T* get() const  {
+    return ptr;
+  }
+
+  void swap(scoped_ptr & b) {
+    T* tmp = b.ptr;
+    b.ptr = ptr;
+    ptr = tmp;
+  }
+
+  T* release() {
+    T* tmp = ptr;
+    ptr = NULL;
+    return tmp;
+  }
+
+  T** accept() {
+    if (ptr) {
+      delete ptr;
+      ptr = NULL;
+    }
+    return &ptr;
+  }
+
+  T** use() {
+    return &ptr;
+  }
+};
+
+template<typename T> inline
+void swap(scoped_ptr<T>& a, scoped_ptr<T>& b) {
+  a.swap(b);
+}
+
+
+
+
+//  scoped_array extends scoped_ptr to arrays. Deletion of the array pointed to
+//  is guaranteed, either on destruction of the scoped_array or via an explicit
+//  reset(). Use shared_array or std::vector if your needs are more complex.
+
+template<typename T>
+class scoped_array {
+ private:
+
+  T* ptr;
+
+  scoped_array(scoped_array const &);
+  scoped_array & operator=(scoped_array const &);
+
+ public:
+
+  typedef T element_type;
+
+  explicit scoped_array(T* p = NULL) : ptr(p) {}
+
+  ~scoped_array() {
+    typedef char type_must_be_complete[sizeof(T)];
+    delete[] ptr;
+  }
+
+  void reset(T* p = NULL) {
+    typedef char type_must_be_complete[sizeof(T)];
+
+    if (ptr != p) {
+      T* arr = ptr;
+      ptr = p;
+      // Delete last, in case arr destructor indirectly results in ~scoped_array
+      delete [] arr;
+    }
+  }
+
+  T& operator[](std::ptrdiff_t i) const {
+    assert(ptr != NULL);
+    assert(i >= 0);
+    return ptr[i];
+  }
+
+  T* get() const {
+    return ptr;
+  }
+
+  void swap(scoped_array & b) {
+    T* tmp = b.ptr;
+    b.ptr = ptr;
+    ptr = tmp;
+  }
+
+  T* release() {
+    T* tmp = ptr;
+    ptr = NULL;
+    return tmp;
+  }
+
+  T** accept() {
+    if (ptr) {
+      delete [] ptr;
+      ptr = NULL;
+    }
+    return &ptr;
+  }
+};
+
+template<class T> inline
+void swap(scoped_array<T>& a, scoped_array<T>& b) {
+  a.swap(b);
+}
+
+// scoped_ptr_malloc<> is similar to scoped_ptr<>, but it accepts a
+// second template argument, the function used to free the object.
+
+template<typename T, void (*FF)(void*) = free> class scoped_ptr_malloc {
+ private:
+
+  T* ptr;
+
+  scoped_ptr_malloc(scoped_ptr_malloc const &);
+  scoped_ptr_malloc & operator=(scoped_ptr_malloc const &);
+
+ public:
+
+  typedef T element_type;
+
+  explicit scoped_ptr_malloc(T* p = 0): ptr(p) {}
+
+  ~scoped_ptr_malloc() {
+    FF(static_cast<void*>(ptr));
+  }
+
+  void reset(T* p = 0) {
+    if (ptr != p) {
+      FF(static_cast<void*>(ptr));
+      ptr = p;
+    }
+  }
+
+  T& operator*() const {
+    assert(ptr != 0);
+    return *ptr;
+  }
+
+  T* operator->() const {
+    assert(ptr != 0);
+    return ptr;
+  }
+
+  T* get() const {
+    return ptr;
+  }
+
+  void swap(scoped_ptr_malloc & b) {
+    T* tmp = b.ptr;
+    b.ptr = ptr;
+    ptr = tmp;
+  }
+
+  T* release() {
+    T* tmp = ptr;
+    ptr = 0;
+    return tmp;
+  }
+
+  T** accept() {
+    if (ptr) {
+      FF(static_cast<void*>(ptr));
+      ptr = 0;
+    }
+    return &ptr;
+  }
+};
+
+template<typename T, void (*FF)(void*)> inline
+void swap(scoped_ptr_malloc<T,FF>& a, scoped_ptr_malloc<T,FF>& b) {
+  a.swap(b);
+}
+
+} // namespace webrtc
+
+#endif  // #ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_SCOPED_PTR_H_
diff --git a/src/system_wrappers/interface/scoped_refptr.h b/src/system_wrappers/interface/scoped_refptr.h
new file mode 100644
index 0000000..0df15be
--- /dev/null
+++ b/src/system_wrappers/interface/scoped_refptr.h
@@ -0,0 +1,137 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file under third_party_mods/chromium or at:
+// http://src.chromium.org/svn/trunk/src/LICENSE
+
+#ifndef SYSTEM_WRAPPERS_INTERFACE_SCOPED_REFPTR_H_
+#define SYSTEM_WRAPPERS_INTERFACE_SCOPED_REFPTR_H_
+
+namespace webrtc {
+
+// Extracted from Chromium's src/base/memory/ref_counted.h.
+
+//
+// A smart pointer class for reference counted objects.  Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference.  Sample usage:
+//
+//   class MyFoo : public RefCounted<MyFoo> {
+//    ...
+//   };
+//
+//   void some_function() {
+//     scoped_refptr<MyFoo> foo = new MyFoo();
+//     foo->Method(param);
+//     // |foo| is released when this function returns
+//   }
+//
+//   void some_other_function() {
+//     scoped_refptr<MyFoo> foo = new MyFoo();
+//     ...
+//     foo = NULL;  // explicitly releases |foo|
+//     ...
+//     if (foo)
+//       foo->Method(param);
+//   }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+//   {
+//     scoped_refptr<MyFoo> a = new MyFoo();
+//     scoped_refptr<MyFoo> b;
+//
+//     b.swap(a);
+//     // now, |b| references the MyFoo object, and |a| references NULL.
+//   }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+//   {
+//     scoped_refptr<MyFoo> a = new MyFoo();
+//     scoped_refptr<MyFoo> b;
+//
+//     b = a;
+//     // now, |a| and |b| each own a reference to the same MyFoo object.
+//   }
+//
+template <class T>
+class scoped_refptr {
+ public:
+  scoped_refptr() : ptr_(NULL) {
+  }
+
+  scoped_refptr(T* p) : ptr_(p) {
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  template <typename U>
+  scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  ~scoped_refptr() {
+    if (ptr_)
+      ptr_->Release();
+  }
+
+  T* get() const { return ptr_; }
+  operator T*() const { return ptr_; }
+  T* operator->() const { return ptr_; }
+
+  // Release a pointer.
+  // The return value is the current pointer held by this object.
+  // If this object holds a NULL pointer, the return value is NULL.
+  // After this operation, this object will hold a NULL pointer,
+  // and will not own the object any more.
+  T* release() {
+    T* retVal = ptr_;
+    ptr_ = NULL;
+    return retVal;
+  }
+
+  scoped_refptr<T>& operator=(T* p) {
+    // AddRef first so that self assignment should work
+    if (p)
+      p->AddRef();
+    if (ptr_ )
+      ptr_->Release();
+    ptr_ = p;
+    return *this;
+  }
+
+  scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
+    return *this = r.ptr_;
+  }
+
+  template <typename U>
+  scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
+    return *this = r.get();
+  }
+
+  void swap(T** pp) {
+    T* p = ptr_;
+    ptr_ = *pp;
+    *pp = p;
+  }
+
+  void swap(scoped_refptr<T>& r) {
+    swap(&r.ptr_);
+  }
+
+ protected:
+  T* ptr_;
+};
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INTERFACE_SCOPED_REFPTR_H_
diff --git a/src/system_wrappers/interface/static_instance.h b/src/system_wrappers/interface/static_instance.h
new file mode 100644
index 0000000..8fe91cc
--- /dev/null
+++ b/src/system_wrappers/interface/static_instance.h
@@ -0,0 +1,155 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_STATICINSTANCETEMPLATE_H_
+#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_STATICINSTANCETEMPLATE_H_
+
+#include <assert.h>
+
+#include "critical_section_wrapper.h"
+#ifdef _WIN32
+#include "fix_interlocked_exchange_pointer_win.h"
+#endif
+
+namespace webrtc {
+
+enum CountOperation {
+  kRelease,
+  kAddRef,
+  kAddRefNoCreate
+};
+enum CreateOperation {
+  kInstanceExists,
+  kCreate,
+  kDestroy
+};
+
+template <class T>
+// Construct On First Use idiom. Avoids
+// "static initialization order fiasco".
+static T* GetStaticInstance(CountOperation count_operation) {
+  // TODO (hellner): use atomic wrapper instead.
+  static volatile long instance_count = 0;
+  static T* volatile instance = NULL;
+  CreateOperation state = kInstanceExists;
+#ifndef _WIN32
+  // This memory is staticly allocated once. The application does not try to
+  // free this memory. This approach is taken to avoid issues with
+  // destruction order for statically allocated memory. The memory will be
+  // reclaimed by the OS and memory leak tools will not recognize memory
+  // reachable from statics leaked so no noise is added by doing this.
+  static CriticalSectionWrapper* crit_sect(
+      CriticalSectionWrapper::CreateCriticalSection());
+  CriticalSectionScoped lock(crit_sect);
+
+  if (count_operation ==
+      kAddRefNoCreate && instance_count == 0) {
+    return NULL;
+  }
+  if (count_operation ==
+      kAddRef ||
+      count_operation == kAddRefNoCreate) {
+    instance_count++;
+    if (instance_count == 1) {
+      state = kCreate;
+    }
+  } else {
+    instance_count--;
+    if (instance_count == 0) {
+      state = kDestroy;
+    }
+  }
+  if (state == kCreate) {
+    instance = T::CreateInstance();
+  } else if (state == kDestroy) {
+    T* old_instance = instance;
+    instance = NULL;
+    // The state will not change past this point. Release the critical
+    // section while deleting the object in case it would be blocking on
+    // access back to this object. (This is the case for the tracing class
+    // since the thread owned by the tracing class also traces).
+    // TODO(hellner): this is a bit out of place but here goes, de-couple
+    // thread implementation with trace implementation.
+    crit_sect->Leave();
+    if (old_instance) {
+      delete old_instance;
+    }
+    // Re-acquire the lock since the scoped critical section will release
+    // it.
+    crit_sect->Enter();
+    return NULL;
+  }
+#else  // _WIN32
+  if (count_operation ==
+      kAddRefNoCreate && instance_count == 0) {
+    return NULL;
+  }
+  if (count_operation == kAddRefNoCreate) {
+    if (1 == InterlockedIncrement(&instance_count)) {
+      // The instance has been destroyed by some other thread. Rollback.
+      InterlockedDecrement(&instance_count);
+      assert(false);
+      return NULL;
+    }
+    // Sanity to catch corrupt state.
+    if (instance == NULL) {
+      assert(false);
+      InterlockedDecrement(&instance_count);
+      return NULL;
+    }
+  } else if (count_operation == kAddRef) {
+    if (instance_count == 0) {
+      state = kCreate;
+    } else {
+      if (1 == InterlockedIncrement(&instance_count)) {
+        // InterlockedDecrement because reference count should not be
+        // updated just yet (that's done when the instance is created).
+        InterlockedDecrement(&instance_count);
+        state = kCreate;
+      }
+    }
+  } else {
+    int newValue = InterlockedDecrement(&instance_count);
+    if (newValue == 0) {
+      state = kDestroy;
+    }
+  }
+
+  if (state == kCreate) {
+    // Create instance and let whichever thread finishes first assign its
+    // local copy to the global instance. All other threads reclaim their
+    // local copy.
+    T* new_instance = T::CreateInstance();
+    if (1 == InterlockedIncrement(&instance_count)) {
+      T* old_value = static_cast<T*> (InterlockedExchangePointer(
+          reinterpret_cast<void* volatile*>(&instance), new_instance));
+      assert(old_value == NULL);
+      assert(instance);
+    } else {
+      InterlockedDecrement(&instance_count);
+      if (new_instance) {
+        delete static_cast<T*>(new_instance);
+      }
+    }
+  } else if (state == kDestroy) {
+    T* old_value = static_cast<T*> (InterlockedExchangePointer(
+        reinterpret_cast<void* volatile*>(&instance), NULL));
+    if (old_value) {
+      delete static_cast<T*>(old_value);
+    }
+    return NULL;
+  }
+#endif  // #ifndef _WIN32
+  return instance;
+}
+
+}  // namspace webrtc
+
+#endif  // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_STATICINSTANCETEMPLATE_H_
diff --git a/src/system_wrappers/interface/thread_wrapper.h b/src/system_wrappers/interface/thread_wrapper.h
index eccf3c2..72a06e8 100644
--- a/src/system_wrappers/interface/thread_wrapper.h
+++ b/src/system_wrappers/interface/thread_wrapper.h
@@ -21,7 +21,9 @@
 // function.
 #define ThreadObj void*
 
-// Callback function that the spawned thread will enter once spawned
+// Callback function that the spawned thread will enter once spawned.
+// A return value of false is interpreted as that the function has no
+// more work to do and that the thread can be released.
 typedef  bool(*ThreadRunFunction)(ThreadObj);
 
 enum ThreadPriority
diff --git a/src/system_wrappers/interface/tick_util.h b/src/system_wrappers/interface/tick_util.h
index 4c28067..e78e53d 100644
--- a/src/system_wrappers/interface/tick_util.h
+++ b/src/system_wrappers/interface/tick_util.h
@@ -76,6 +76,11 @@
                                   const TickInterval& rhs);
     TickInterval& operator+=(const TickInterval& rhs);
 
+    friend bool operator>(const TickInterval& lhs, const TickInterval& rhs);
+    friend bool operator<=(const TickInterval& lhs, const TickInterval& rhs);
+    friend bool operator<(const TickInterval& lhs, const TickInterval& rhs);
+    friend bool operator>=(const TickInterval& lhs, const TickInterval& rhs);
+
 private:
     TickInterval(WebRtc_Word64 interval);
 
@@ -107,6 +112,22 @@
     time._ticks += ticks;
     return time;
 }
+inline bool operator>(const TickInterval& lhs, const TickInterval& rhs)
+{
+    return lhs._interval > rhs._interval;
+}
+inline bool operator<=(const TickInterval& lhs, const TickInterval& rhs)
+{
+    return lhs._interval <= rhs._interval;
+}
+inline bool operator<(const TickInterval& lhs, const TickInterval& rhs)
+{
+    return lhs._interval <= rhs._interval;
+}
+inline bool operator>=(const TickInterval& lhs, const TickInterval& rhs)
+{
+    return lhs._interval >= rhs._interval;
+}
 
 inline TickTime TickTime::Now()
 {
diff --git a/src/system_wrappers/interface/trace.h b/src/system_wrappers/interface/trace.h
index 0f7df4d..8330f7c 100644
--- a/src/system_wrappers/interface/trace.h
+++ b/src/system_wrappers/interface/trace.h
@@ -18,14 +18,7 @@
 #include "common_types.h"
 #include "typedefs.h"
 
-#ifdef WEBRTC_NO_TRACE
-    #define WEBRTC_TRACE
-#else
-    // Ideally we would use __VA_ARGS__ but it's not supported by all compilers
-    // such as VS2003 (it's supported in VS2005). TODO (hellner) why
-    // would this be better than current implementation (not convinced)?
-    #define WEBRTC_TRACE Trace::Add
-#endif
+#define WEBRTC_TRACE Trace::Add
 
 namespace webrtc {
 class Trace
diff --git a/src/system_wrappers/source/Android.mk b/src/system_wrappers/source/Android.mk
index f8e406f..575580a 100644
--- a/src/system_wrappers/source/Android.mk
+++ b/src/system_wrappers/source/Android.mk
@@ -1,14 +1,21 @@
-# This file is generated by gyp; do not edit. This means you!
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
 
 LOCAL_PATH := $(call my-dir)
 
 include $(CLEAR_VARS)
 
+include $(LOCAL_PATH)/../../../android-webrtc.mk
+
 LOCAL_ARM_MODE := arm
 LOCAL_MODULE := libwebrtc_system_wrappers
 LOCAL_MODULE_TAGS := optional
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_GENERATED_SOURCES :=
 LOCAL_SRC_FILES := \
     map.cc \
     rw_lock_generic.cc \
@@ -16,8 +23,10 @@
     aligned_malloc.cc \
     atomic32.cc \
     condition_variable.cc \
-    cpu.cc \
+    cpu_no_op.cc \
     cpu_features.cc \
+    cpu_features_arm.c \
+    cpu_info.cc \
     critical_section.cc \
     event.cc \
     file_impl.cc \
@@ -25,43 +34,28 @@
     rw_lock.cc \
     thread.cc \
     trace_impl.cc \
-    condition_variable_linux.cc \
+    condition_variable_posix.cc \
     cpu_linux.cc \
-    critical_section_linux.cc \
-    event_linux.cc \
-    thread_linux.cc \
-    trace_linux.cc \
-    rw_lock_linux.cc 
+    critical_section_posix.cc \
+    event_posix.cc \
+    thread_posix.cc \
+    trace_posix.cc \
+    rw_lock_posix.cc 
 
-# Flags passed to both C and C++ files.
-MY_CFLAGS :=  
-MY_CFLAGS_C :=
-MY_DEFS := '-DNO_TCMALLOC' \
-    '-DNO_HEAPCHECKER' \
-    '-DWEBRTC_TARGET_PC' \
-    '-DWEBRTC_LINUX' \
-    '-DWEBRTC_CLOCK_TYPE_REALTIME' \
-    '-DWEBRTC_THREAD_RR' \
-    '-DWEBRTC_ANDROID' \
-    '-DANDROID' 
-LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
+LOCAL_CFLAGS := \
+    $(MY_WEBRTC_COMMON_DEFS)
 
-# Include paths placed before CFLAGS/CPPFLAGS
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/../.. \
-    $(LOCAL_PATH)/spreadsortlib \
-    $(LOCAL_PATH)/../interface 
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/../.. \
+    $(LOCAL_PATH)/../interface \
+    $(LOCAL_PATH)/spreadsortlib
 
-# Flags passed to only C++ (and not C) files.
-LOCAL_CPPFLAGS := 
-
-LOCAL_LDFLAGS :=
-
-LOCAL_STATIC_LIBRARIES :=
-
-LOCAL_SHARED_LIBRARIES := libcutils \
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
     libdl \
     libstlport
-LOCAL_ADDITIONAL_DEPENDENCIES :=
 
+ifndef NDK_ROOT
 include external/stlport/libstlport.mk
+endif
 include $(BUILD_STATIC_LIBRARY)
diff --git a/src/system_wrappers/source/aligned_malloc.cc b/src/system_wrappers/source/aligned_malloc.cc
index 6225753..bb10c6b 100644
--- a/src/system_wrappers/source/aligned_malloc.cc
+++ b/src/system_wrappers/source/aligned_malloc.cc
@@ -13,7 +13,7 @@
 #include <assert.h>
 #include <memory.h>
 
-#ifdef ANDROID
+#ifdef WEBRTC_ANDROID
 #include <stdlib.h>
 #endif
 
diff --git a/src/system_wrappers/source/atomic32.cc b/src/system_wrappers/source/atomic32.cc
index 3d6849e..588dd3e 100644
--- a/src/system_wrappers/source/atomic32.cc
+++ b/src/system_wrappers/source/atomic32.cc
@@ -11,7 +11,7 @@
 #include "atomic32_wrapper.h"
 
 #if defined(_WIN32)
-    #include "atomic32_windows.h"
+    #include "atomic32_win.h"
 #elif defined(WEBRTC_LINUX)
     #include "atomic32_linux.h"
 #elif defined(WEBRTC_MAC)
diff --git a/src/system_wrappers/source/condition_variable.cc b/src/system_wrappers/source/condition_variable.cc
index 7ca1b56..b37d037 100644
--- a/src/system_wrappers/source/condition_variable.cc
+++ b/src/system_wrappers/source/condition_variable.cc
@@ -11,16 +11,16 @@
 #if defined(_WIN32)
    #include <windows.h>
    #include "condition_variable_wrapper.h"
-   #include "condition_variable_windows.h"
+   #include "condition_variable_win.h"
 #elif defined(WEBRTC_LINUX)
    #include <pthread.h>
    #include "condition_variable_wrapper.h"
-   #include "condition_variable_linux.h"
+   #include "condition_variable_posix.h"
 #elif defined(WEBRTC_MAC) || defined(WEBRTC_MAC_INTEL)
    #include <pthread.h>
    #include "condition_variable_wrapper.h"
-   #include "condition_variable_linux.h"
- #endif
+   #include "condition_variable_posix.h"
+#endif
 
 namespace webrtc {
 ConditionVariableWrapper*
@@ -29,7 +29,7 @@
 #if defined(_WIN32)
     return new ConditionVariableWindows;
 #elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC) || defined(WEBRTC_MAC_INTEL)
-    return ConditionVariableLinux::Create();
+    return ConditionVariablePosix::Create();
 #else
     return NULL;
 #endif
diff --git a/src/system_wrappers/source/condition_variable_linux.cc b/src/system_wrappers/source/condition_variable_posix.cc
similarity index 81%
rename from src/system_wrappers/source/condition_variable_linux.cc
rename to src/system_wrappers/source/condition_variable_posix.cc
index 778c2cf..48835ab 100644
--- a/src/system_wrappers/source/condition_variable_linux.cc
+++ b/src/system_wrappers/source/condition_variable_posix.cc
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "condition_variable_linux.h"
+#include "condition_variable_posix.h"
 
 #if defined(WEBRTC_LINUX)
 #include <ctime>
@@ -18,12 +18,12 @@
 
 #include <errno.h>
 
-#include "critical_section_linux.h"
+#include "critical_section_posix.h"
 
 namespace webrtc {
-ConditionVariableWrapper* ConditionVariableLinux::Create()
+ConditionVariableWrapper* ConditionVariablePosix::Create()
 {
-    ConditionVariableLinux* ptr = new ConditionVariableLinux;
+    ConditionVariablePosix* ptr = new ConditionVariablePosix;
     if (!ptr)
     {
         return NULL;
@@ -39,11 +39,11 @@
     return ptr;
 }
 
-ConditionVariableLinux::ConditionVariableLinux()
+ConditionVariablePosix::ConditionVariablePosix()
 {
 }
 
-int ConditionVariableLinux::Construct()
+int ConditionVariablePosix::Construct()
 {
     int result = 0;
 #ifdef WEBRTC_CLOCK_TYPE_REALTIME
@@ -74,21 +74,21 @@
     return 0;
 }
 
-ConditionVariableLinux::~ConditionVariableLinux()
+ConditionVariablePosix::~ConditionVariablePosix()
 {
     pthread_cond_destroy(&_cond);
 }
 
-void ConditionVariableLinux::SleepCS(CriticalSectionWrapper& critSect)
+void ConditionVariablePosix::SleepCS(CriticalSectionWrapper& critSect)
 {
-    CriticalSectionLinux* cs = reinterpret_cast<CriticalSectionLinux*>(
+    CriticalSectionPosix* cs = reinterpret_cast<CriticalSectionPosix*>(
                                    &critSect);
     pthread_cond_wait(&_cond, &cs->_mutex);
 }
 
 
 bool
-ConditionVariableLinux::SleepCS(
+ConditionVariablePosix::SleepCS(
     CriticalSectionWrapper& critSect,
     unsigned long maxTimeInMS)
 {
@@ -101,7 +101,7 @@
     const int NANOSECONDS_PER_SECOND       = 1000000000;
     const int NANOSECONDS_PER_MILLISECOND  = 1000000;
 
-    CriticalSectionLinux* cs = reinterpret_cast<CriticalSectionLinux*>(
+    CriticalSectionPosix* cs = reinterpret_cast<CriticalSectionPosix*>(
                                    &critSect);
 
     if (maxTimeInMS != INFINITE)
@@ -139,12 +139,12 @@
     }
 }
 
-void ConditionVariableLinux::Wake()
+void ConditionVariablePosix::Wake()
 {
     pthread_cond_signal(&_cond);
 }
 
-void ConditionVariableLinux::WakeAll()
+void ConditionVariablePosix::WakeAll()
 {
     pthread_cond_broadcast(&_cond);
 }
diff --git a/src/system_wrappers/source/condition_variable_linux.h b/src/system_wrappers/source/condition_variable_posix.h
similarity index 72%
rename from src/system_wrappers/source/condition_variable_linux.h
rename to src/system_wrappers/source/condition_variable_posix.h
index 0300c5b..c239a47 100644
--- a/src/system_wrappers/source/condition_variable_linux.h
+++ b/src/system_wrappers/source/condition_variable_posix.h
@@ -8,19 +8,19 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_CONDITION_VARIABLE_LINUX_H_
-#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_CONDITION_VARIABLE_LINUX_H_
+#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_CONDITION_VARIABLE_POSIX_H_
+#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_CONDITION_VARIABLE_POSIX_H_
 
 #include "condition_variable_wrapper.h"
 
 #include <pthread.h>
 
 namespace webrtc {
-class ConditionVariableLinux : public ConditionVariableWrapper
+class ConditionVariablePosix : public ConditionVariableWrapper
 {
 public:
     static ConditionVariableWrapper* Create();
-    ~ConditionVariableLinux();
+    ~ConditionVariablePosix();
 
     void SleepCS(CriticalSectionWrapper& critSect);
     bool SleepCS(CriticalSectionWrapper& critSect, unsigned long maxTimeInMS);
@@ -28,7 +28,7 @@
     void WakeAll();
 
 private:
-    ConditionVariableLinux();
+    ConditionVariablePosix();
     int Construct();
 
 private:
@@ -36,4 +36,4 @@
 };
 } // namespace webrtc
 
-#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_CONDITION_VARIABLE_LINUX_H_
+#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_CONDITION_VARIABLE_POSIX_H_
diff --git a/src/system_wrappers/source/cpu.cc b/src/system_wrappers/source/cpu.cc
index 2285872..3df5d18 100644
--- a/src/system_wrappers/source/cpu.cc
+++ b/src/system_wrappers/source/cpu.cc
@@ -11,74 +11,25 @@
 #include "cpu_wrapper.h"
 
 #if defined(_WIN32)
-    #include <Windows.h>
-    #include "engine_configurations.h"
-    #include "cpu_windows.h"
+    #include "cpu_win.h"
 #elif defined(WEBRTC_MAC)
-    #include <sys/types.h>
-    #include <sys/sysctl.h>
     #include "cpu_mac.h"
 #elif defined(WEBRTC_MAC_INTEL)
     #include "cpu_mac.h"
-#elif defined(ANDROID)
+#elif defined(WEBRTC_ANDROID)
     // Not implemented yet, might be possible to use Linux implementation
 #else // defined(WEBRTC_LINUX)
-    #include <sys/sysinfo.h>
     #include "cpu_linux.h"
 #endif
 
-#include "trace.h"
-
 namespace webrtc {
-WebRtc_UWord32 CpuWrapper::_numberOfCores = 0;
-
-WebRtc_UWord32 CpuWrapper::DetectNumberOfCores()
-{
-    if (!_numberOfCores)
-    {
-#if defined(_WIN32)
-        SYSTEM_INFO si;
-        GetSystemInfo(&si);
-        _numberOfCores = static_cast<WebRtc_UWord32>(si.dwNumberOfProcessors);
-        WEBRTC_TRACE(kTraceStateInfo, kTraceUtility, -1,
-                     "Available number of cores:%d", _numberOfCores);
-
-#elif defined(WEBRTC_LINUX) && !defined(ANDROID)
-        _numberOfCores = get_nprocs();
-        WEBRTC_TRACE(kTraceStateInfo, kTraceUtility, -1,
-                     "Available number of cores:%d", _numberOfCores);
-
-#elif (defined(WEBRTC_MAC) || defined(WEBRTC_MAC_INTEL))
-        int name[] = {CTL_HW, HW_AVAILCPU};
-        int ncpu;
-        size_t size = sizeof(ncpu);
-        if(0 == sysctl(name, 2, &ncpu, &size, NULL, 0))
-        {
-            _numberOfCores = static_cast<WebRtc_UWord32>(ncpu);
-            WEBRTC_TRACE(kTraceStateInfo, kTraceUtility, -1,
-                         "Available number of cores:%d", _numberOfCores);
-    } else
-    {
-            WEBRTC_TRACE(kTraceError, kTraceUtility, -1,
-                         "Failed to get number of cores");
-            _numberOfCores = 1;
-    }
-#else
-        WEBRTC_TRACE(kTraceWarning, kTraceUtility, -1,
-                     "No function to get number of cores");
-        _numberOfCores = 1;
-#endif
-    }
-    return _numberOfCores;
-}
-
 CpuWrapper* CpuWrapper::CreateCpu()
 {
 #if defined(_WIN32)
    return new CpuWindows();
 #elif (defined(WEBRTC_MAC) || defined(WEBRTC_MAC_INTEL))
     return new CpuWrapperMac();
-#elif defined(ANDROID)
+#elif defined(WEBRTC_ANDROID)
     return 0;
 #else
     return new CpuLinux();
diff --git a/src/system_wrappers/source/cpu_features.cc b/src/system_wrappers/source/cpu_features.cc
index 850dc9b..41a86e3 100644
--- a/src/system_wrappers/source/cpu_features.cc
+++ b/src/system_wrappers/source/cpu_features.cc
@@ -8,17 +8,29 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+// Parts of this file derived from Chromium's base/cpu.cc.
+
 #include "cpu_features_wrapper.h"
 
+#include "typedefs.h"
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#if defined(_MSC_VER)
+#include <intrin.h>
+#endif
+#endif
+
 // No CPU feature is available => straight C path.
 int GetCPUInfoNoASM(CPUFeature feature) {
   (void)feature;
   return 0;
 }
 
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#ifndef _MSC_VER
 // Intrinsic for "cpuid".
 #if defined(__pic__) && defined(__i386__)
-static inline void cpuid(int cpu_info[4], int info_type) {
+static inline void __cpuid(int cpu_info[4], int info_type) {
   __asm__ volatile (
     "mov %%ebx, %%edi\n"
     "cpuid\n"
@@ -26,20 +38,22 @@
     : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
     : "a"(info_type));
 }
-#elif defined(__i386__) || defined(__x86_64__)
-static inline void cpuid(int cpu_info[4], int info_type) {
+#else
+static inline void __cpuid(int cpu_info[4], int info_type) {
   __asm__ volatile (
     "cpuid\n"
     : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
     : "a"(info_type));
 }
 #endif
+#endif  // _MSC_VER
+#endif  // WEBRTC_ARCH_X86_FAMILY
 
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(WEBRTC_ARCH_X86_FAMILY)
 // Actual feature detection for x86.
 static int GetCPUInfo(CPUFeature feature) {
   int cpu_info[4];
-  cpuid(cpu_info, 1);
+  __cpuid(cpu_info, 1);
   if (feature == kSSE2) {
     return 0 != (cpu_info[3] & 0x04000000);
   }
diff --git a/src/system_wrappers/source/cpu_features_arm.c b/src/system_wrappers/source/cpu_features_arm.c
new file mode 100644
index 0000000..1065118
--- /dev/null
+++ b/src/system_wrappers/source/cpu_features_arm.c
@@ -0,0 +1,333 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file is derived from Android's NDK package r7, located at
+// <ndk>/sources/android/cpufeatures/ (downloadable from
+// http://developer.android.com/sdk/ndk/index.html).
+
+#include "cpu_features_wrapper.h"
+
+#include <fcntl.h>
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+// Define CPU family.
+typedef enum {
+  CPU_FAMILY_UNKNOWN = 0,
+  CPU_FAMILY_ARM,
+  CPU_FAMILY_X86,
+  CPU_FAMILY_MAX  // Do not remove.
+} CpuFamily;
+
+static pthread_once_t g_once;
+static CpuFamily g_cpuFamily;
+static uint64_t g_cpuFeatures;
+static int g_cpuCount;
+
+static const int cpufeatures_debug = 0;
+
+#ifdef __arm__
+#  define DEFAULT_CPU_FAMILY  CPU_FAMILY_ARM
+#elif defined __i386__
+#  define DEFAULT_CPU_FAMILY  CPU_FAMILY_X86
+#else
+#  define DEFAULT_CPU_FAMILY  CPU_FAMILY_UNKNOWN
+#endif
+
+#define  D(...) \
+  do { \
+    if (cpufeatures_debug) { \
+      printf(__VA_ARGS__); fflush(stdout); \
+    } \
+  } while (0)
+
+/* Read the content of /proc/cpuinfo into a user-provided buffer.
+ * Return the length of the data, or -1 on error. Does *not*
+ * zero-terminate the content. Will not read more
+ * than 'buffsize' bytes.
+ */
+static int read_file(const char*  pathname, char*  buffer, size_t  buffsize) {
+  int  fd, len;
+
+  fd = open(pathname, O_RDONLY);
+  if (fd < 0)
+    return -1;
+
+  do {
+    len = read(fd, buffer, buffsize);
+  } while (len < 0 && errno == EINTR);
+
+  close(fd);
+
+  return len;
+}
+
+/* Extract the content of a the first occurence of a given field in
+ * the content of /proc/cpuinfo and return it as a heap-allocated
+ * string that must be freed by the caller.
+ *
+ * Return NULL if not found
+ */
+static char* extract_cpuinfo_field(char* buffer, int buflen, const char* field) {
+  int  fieldlen = strlen(field);
+  char* bufend = buffer + buflen;
+  char* result = NULL;
+  int len, ignore;
+  const char* p, *q;
+
+  /* Look for first field occurence, and ensures it starts the line.
+   */
+  p = buffer;
+  bufend = buffer + buflen;
+  for (;;) {
+    p = memmem(p, bufend - p, field, fieldlen);
+    if (p == NULL)
+      goto EXIT;
+
+    if (p == buffer || p[-1] == '\n')
+      break;
+
+    p += fieldlen;
+  }
+
+  /* Skip to the first column followed by a space */
+  p += fieldlen;
+  p  = memchr(p, ':', bufend - p);
+  if (p == NULL || p[1] != ' ')
+    goto EXIT;
+
+  /* Find the end of the line */
+  p += 2;
+  q = memchr(p, '\n', bufend - p);
+  if (q == NULL)
+    q = bufend;
+
+  /* Copy the line into a heap-allocated buffer */
+  len = q - p;
+  result = malloc(len + 1);
+  if (result == NULL)
+    goto EXIT;
+
+  memcpy(result, p, len);
+  result[len] = '\0';
+
+EXIT:
+  return result;
+}
+
+/* Count the number of occurences of a given field prefix in /proc/cpuinfo.
+ */
+static int count_cpuinfo_field(char* buffer, int buflen, const char* field) {
+  int fieldlen = strlen(field);
+  const char* p = buffer;
+  const char* bufend = buffer + buflen;
+  const char* q;
+  int count = 0;
+
+  for (;;) {
+    const char* q;
+
+    p = memmem(p, bufend - p, field, fieldlen);
+    if (p == NULL)
+      break;
+
+    /* Ensure that the field is at the start of a line */
+    if (p > buffer && p[-1] != '\n') {
+      p += fieldlen;
+      continue;
+    }
+
+
+    /* skip any whitespace */
+    q = p + fieldlen;
+    while (q < bufend && (*q == ' ' || *q == '\t'))
+      q++;
+
+    /* we must have a colon now */
+    if (q < bufend && *q == ':') {
+      count += 1;
+      q ++;
+    }
+    p = q;
+  }
+
+  return count;
+}
+
+/* Like strlen(), but for constant string literals */
+#define STRLEN_CONST(x)  ((sizeof(x)-1)
+
+
+/* Checks that a space-separated list of items contains one given 'item'.
+ * Returns 1 if found, 0 otherwise.
+ */
+static int has_list_item(const char* list, const char* item) {
+  const char*  p = list;
+  int itemlen = strlen(item);
+
+  if (list == NULL)
+    return 0;
+
+  while (*p) {
+    const char*  q;
+
+    /* skip spaces */
+    while (*p == ' ' || *p == '\t')
+      p++;
+
+    /* find end of current list item */
+    q = p;
+    while (*q && *q != ' ' && *q != '\t')
+      q++;
+
+    if (itemlen == q - p && !memcmp(p, item, itemlen))
+      return 1;
+
+    /* skip to next item */
+    p = q;
+  }
+  return 0;
+}
+
+
+static void cpuInit(void) {
+  char cpuinfo[4096];
+  int  cpuinfo_len;
+
+  g_cpuFamily   = DEFAULT_CPU_FAMILY;
+  g_cpuFeatures = 0;
+  g_cpuCount    = 1;
+
+  cpuinfo_len = read_file("/proc/cpuinfo", cpuinfo, sizeof cpuinfo);
+  D("cpuinfo_len is (%d):\n%.*s\n", cpuinfo_len,
+    cpuinfo_len >= 0 ? cpuinfo_len : 0, cpuinfo);
+
+  if (cpuinfo_len < 0) { /* should not happen */
+    return;
+  }
+
+  /* Count the CPU cores, the value may be 0 for single-core CPUs */
+  g_cpuCount = count_cpuinfo_field(cpuinfo, cpuinfo_len, "processor");
+  if (g_cpuCount == 0) {
+    g_cpuCount = count_cpuinfo_field(cpuinfo, cpuinfo_len, "Processor");
+    if (g_cpuCount == 0) {
+      g_cpuCount = 1;
+    }
+  }
+
+  D("found cpuCount = %d\n", g_cpuCount);
+
+#ifdef __arm__
+  {
+    char*  features = NULL;
+    char*  architecture = NULL;
+
+    /* Extract architecture from the "CPU Architecture" field.
+     * The list is well-known, unlike the the output of
+     * the 'Processor' field which can vary greatly.
+     *
+     * See the definition of the 'proc_arch' array in
+     * $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
+     * same file.
+     */
+    char* cpuArch = extract_cpuinfo_field(cpuinfo, cpuinfo_len,
+                                          "CPU architecture");
+
+    if (cpuArch != NULL) {
+      char*  end;
+      long   archNumber;
+      int    hasARMv7 = 0;
+
+      D("found cpuArch = '%s'\n", cpuArch);
+
+      /* read the initial decimal number, ignore the rest */
+      archNumber = strtol(cpuArch, &end, 10);
+
+      /* Here we assume that ARMv8 will be upwards compatible with v7
+          * in the future. Unfortunately, there is no 'Features' field to
+          * indicate that Thumb-2 is supported.
+          */
+      if (end > cpuArch && archNumber >= 7) {
+        hasARMv7 = 1;
+      }
+
+      /* Unfortunately, it seems that certain ARMv6-based CPUs
+       * report an incorrect architecture number of 7!
+       *
+       * We try to correct this by looking at the 'elf_format'
+       * field reported by the 'Processor' field, which is of the
+       * form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
+       * an ARMv6-one.
+       */
+      if (hasARMv7) {
+        char* cpuProc = extract_cpuinfo_field(cpuinfo, cpuinfo_len,
+                                              "Processor");
+        if (cpuProc != NULL) {
+          D("found cpuProc = '%s'\n", cpuProc);
+          if (has_list_item(cpuProc, "(v6l)")) {
+            D("CPU processor and architecture mismatch!!\n");
+            hasARMv7 = 0;
+          }
+          free(cpuProc);
+        }
+      }
+
+      if (hasARMv7) {
+        g_cpuFeatures |= kCPUFeatureARMv7;
+      }
+
+      /* The LDREX / STREX instructions are available from ARMv6 */
+      if (archNumber >= 6) {
+        g_cpuFeatures |= kCPUFeatureLDREXSTREX;
+      }
+
+      free(cpuArch);
+    }
+
+    /* Extract the list of CPU features from 'Features' field */
+    char* cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len,
+                                              "Features");
+
+    if (cpuFeatures != NULL) {
+
+      D("found cpuFeatures = '%s'\n", cpuFeatures);
+
+      if (has_list_item(cpuFeatures, "vfpv3"))
+        g_cpuFeatures |= kCPUFeatureVFPv3;
+
+      else if (has_list_item(cpuFeatures, "vfpv3d16"))
+        g_cpuFeatures |= kCPUFeatureVFPv3;
+
+      if (has_list_item(cpuFeatures, "neon")) {
+        /* Note: Certain kernels only report neon but not vfpv3
+            *       in their features list. However, ARM mandates
+            *       that if Neon is implemented, so must be VFPv3
+            *       so always set the flag.
+            */
+        g_cpuFeatures |= kCPUFeatureNEON |
+                         kCPUFeatureVFPv3;
+      }
+      free(cpuFeatures);
+    }
+  }
+#endif  // __arm__
+
+#ifdef __i386__
+  g_cpuFamily = CPU_FAMILY_X86;
+#endif
+}
+
+
+uint64_t WebRtc_GetCPUFeaturesARM(void) {
+  pthread_once(&g_once, cpuInit);
+  return g_cpuFeatures;
+}
diff --git a/src/system_wrappers/source/cpu_info.cc b/src/system_wrappers/source/cpu_info.cc
new file mode 100644
index 0000000..e367abf
--- /dev/null
+++ b/src/system_wrappers/source/cpu_info.cc
@@ -0,0 +1,72 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "cpu_info.h"
+
+#if defined(_WIN32)
+#include <Windows.h>
+#elif defined(WEBRTC_MAC)
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#elif defined(WEBRTC_MAC_INTEL)
+// Intentionally empty
+#elif defined(WEBRTC_ANDROID)
+// Not implemented yet, might be possible to use Linux implementation
+#else // defined(WEBRTC_LINUX)
+#include <sys/sysinfo.h>
+#endif
+
+#include "trace.h"
+
+namespace webrtc {
+
+WebRtc_UWord32 CpuInfo::_numberOfCores = 0;
+
+WebRtc_UWord32 CpuInfo::DetectNumberOfCores()
+{
+    if (!_numberOfCores)
+    {
+#if defined(_WIN32)
+        SYSTEM_INFO si;
+        GetSystemInfo(&si);
+        _numberOfCores = static_cast<WebRtc_UWord32>(si.dwNumberOfProcessors);
+        WEBRTC_TRACE(kTraceStateInfo, kTraceUtility, -1,
+                     "Available number of cores:%d", _numberOfCores);
+
+#elif defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+        _numberOfCores = get_nprocs();
+        WEBRTC_TRACE(kTraceStateInfo, kTraceUtility, -1,
+                     "Available number of cores:%d", _numberOfCores);
+
+#elif (defined(WEBRTC_MAC) || defined(WEBRTC_MAC_INTEL))
+        int name[] = {CTL_HW, HW_AVAILCPU};
+        int ncpu;
+        size_t size = sizeof(ncpu);
+        if(0 == sysctl(name, 2, &ncpu, &size, NULL, 0))
+        {
+            _numberOfCores = static_cast<WebRtc_UWord32>(ncpu);
+            WEBRTC_TRACE(kTraceStateInfo, kTraceUtility, -1,
+                         "Available number of cores:%d", _numberOfCores);
+    } else
+    {
+            WEBRTC_TRACE(kTraceError, kTraceUtility, -1,
+                         "Failed to get number of cores");
+            _numberOfCores = 1;
+    }
+#else
+        WEBRTC_TRACE(kTraceWarning, kTraceUtility, -1,
+                     "No function to get number of cores");
+        _numberOfCores = 1;
+#endif
+    }
+    return _numberOfCores;
+}
+
+} // namespace webrtc
diff --git a/src/system_wrappers/source/cpu_linux.cc b/src/system_wrappers/source/cpu_linux.cc
index eff9704..9d7d89d 100644
--- a/src/system_wrappers/source/cpu_linux.cc
+++ b/src/system_wrappers/source/cpu_linux.cc
@@ -17,22 +17,29 @@
 
 namespace webrtc {
 CpuLinux::CpuLinux()
-{
-    m_oldBusyTime = 0;
-    m_oldIdleTime = 0;
-    m_numCores = 0;
-    m_numCores = GetNumCores();
-    m_oldBusyTimeMulti = new long long[m_numCores];
-    memset(m_oldBusyTimeMulti, 0, sizeof(long long) * m_numCores);
-    m_oldIdleTimeMulti = new long long[m_numCores];
-    memset(m_oldIdleTimeMulti, 0, sizeof(long long) * m_numCores);
-    m_idleArray = new long long[m_numCores];
-    memset(m_idleArray, 0, sizeof(long long) * m_numCores);
-    m_busyArray = new long long[m_numCores];
-    memset(m_busyArray, 0, sizeof(long long) * m_numCores);
-    m_resultArray = new WebRtc_UWord32[m_numCores];
+    : m_oldBusyTime(0),
+      m_oldIdleTime(0),
+      m_oldBusyTimeMulti(NULL),
+      m_oldIdleTimeMulti(NULL),
+      m_idleArray(NULL),
+      m_busyArray(NULL),
+      m_resultArray(NULL),
+      m_numCores(0) {
+    const int result = GetNumCores();
+    if (result != -1) {
+      m_numCores = result;
+      m_oldBusyTimeMulti = new long long[m_numCores];
+      memset(m_oldBusyTimeMulti, 0, sizeof(long long) * m_numCores);
+      m_oldIdleTimeMulti = new long long[m_numCores];
+      memset(m_oldIdleTimeMulti, 0, sizeof(long long) * m_numCores);
+      m_idleArray = new long long[m_numCores];
+      memset(m_idleArray, 0, sizeof(long long) * m_numCores);
+      m_busyArray = new long long[m_numCores];
+      memset(m_busyArray, 0, sizeof(long long) * m_numCores);
+      m_resultArray = new WebRtc_UWord32[m_numCores];
 
-    GetData(m_oldBusyTime, m_oldIdleTime, m_busyArray, m_idleArray);
+      GetData(m_oldBusyTime, m_oldIdleTime, m_busyArray, m_idleArray);
+    }
 }
 
 CpuLinux::~CpuLinux()
@@ -58,7 +65,8 @@
     numCores = m_numCores;
     long long busy = 0;
     long long idle = 0;
-    GetData(busy, idle, m_busyArray, m_idleArray);
+    if (GetData(busy, idle, m_busyArray, m_idleArray) != 0)
+        return -1;
 
     long long deltaBusy = busy - m_oldBusyTime;
     long long deltaIdle = idle - m_oldIdleTime;
@@ -109,18 +117,28 @@
     }
 
     char line[100];
-    char* dummy = fgets(line, 100, fp);
+    if (fgets(line, 100, fp) == NULL) {
+        fclose(fp);
+        return -1;
+    }
     char firstWord[100];
-    sscanf(line, "%s ", firstWord);
-    if(strncmp(firstWord, "cpu", 3)!=0)
-    {
+    if (sscanf(line, "%s ", firstWord) != 1) {
+        fclose(fp);
+        return -1;
+    }
+    if (strncmp(firstWord, "cpu", 3) != 0) {
+        fclose(fp);
         return -1;
     }
     char sUser[100];
     char sNice[100];
     char sSystem[100];
     char sIdle[100];
-    sscanf(line, "%s %s %s %s %s ", firstWord, sUser, sNice, sSystem, sIdle);
+    if (sscanf(line, "%s %s %s %s %s ",
+               firstWord, sUser, sNice, sSystem, sIdle) != 5) {
+        fclose(fp);
+        return -1;
+    }
     long long luser = atoll(sUser);
     long long lnice = atoll(sNice);
     long long lsystem = atoll(sSystem);
@@ -130,9 +148,15 @@
     idle = lidle;
     for (WebRtc_UWord32 i = 0; i < m_numCores; i++)
     {
-        dummy = fgets(line, 100, fp);
-        sscanf(line, "%s %s %s %s %s ", firstWord, sUser, sNice, sSystem,
-               sIdle);
+        if (fgets(line, 100, fp) == NULL) {
+            fclose(fp);
+            return -1;
+        }
+        if (sscanf(line, "%s %s %s %s %s ", firstWord, sUser, sNice, sSystem,
+                   sIdle) != 5) {
+            fclose(fp);
+            return -1;
+        }
         luser = atoll(sUser);
         lnice = atoll(sNice);
         lsystem = atoll(sSystem);
@@ -153,7 +177,10 @@
     }
     // Skip first line
     char line[100];
-    char* dummy = fgets(line, 100, fp);
+    if (!fgets(line, 100, fp))
+    {
+        return -1;
+    }
     int numCores = -1;
     char firstWord[100];
     do
@@ -161,7 +188,9 @@
         numCores++;
         if (fgets(line, 100, fp))
         {
-            sscanf(line, "%s ", firstWord);
+            if (sscanf(line, "%s ", firstWord) != 1) {
+                firstWord[0] = '\0';
+            }
         } else {
             break;
         }
diff --git a/src/system_wrappers/source/cpu_mac.cc b/src/system_wrappers/source/cpu_mac.cc
index c2a11e1..d82bf07 100644
--- a/src/system_wrappers/source/cpu_mac.cc
+++ b/src/system_wrappers/source/cpu_mac.cc
@@ -17,7 +17,12 @@
 #include "tick_util.h"
 
 namespace webrtc {
-CpuWrapperMac::CpuWrapperMac() : _cpuUsage(NULL)
+CpuWrapperMac::CpuWrapperMac()
+    : _cpuCount(0),
+      _cpuUsage(NULL), 
+      _totalCpuUsage(0),
+      _lastTickCount(NULL),
+      _lastTime(0)
 {
     natural_t cpuCount;
     processor_info_array_t infoArray;
@@ -33,6 +38,7 @@
         return;
     }
 
+    _cpuCount = cpuCount;
     _cpuUsage = new WebRtc_UWord32[cpuCount];
     _lastTickCount = new WebRtc_Word64[cpuCount];
     _lastTime = TickTime::MillisecondTimestamp();
@@ -47,14 +53,15 @@
             ticks += cpuLoadInfo[cpu].cpu_ticks[state];
         }
         _lastTickCount[cpu] = ticks;
+        _cpuUsage[cpu] = 0;
     }
     vm_deallocate(mach_task_self(), (vm_address_t)infoArray, infoCount);
 }
 
 CpuWrapperMac::~CpuWrapperMac()
 {
-    delete _cpuUsage;
-    delete _lastTickCount;
+    delete[] _cpuUsage;
+    delete[] _lastTickCount;
 }
 
 WebRtc_Word32 CpuWrapperMac::CpuUsage()
@@ -68,29 +75,35 @@
 CpuWrapperMac::CpuUsageMultiCore(WebRtc_UWord32& numCores,
                                  WebRtc_UWord32*& array)
 {
-    natural_t cpuCount;
-    processor_info_array_t infoArray;
-    mach_msg_type_number_t infoCount;
-
     // sanity check
     if(_cpuUsage == NULL)
     {
         return -1;
     }
+    
     WebRtc_Word64 now = TickTime::MillisecondTimestamp();
     WebRtc_Word64 timeDiffMS = now - _lastTime;
-    // TODO(hellner) why block here? Why not just return the old
-    //                          value? Is this behavior consistent across all
-    //                          platforms?
-    // Make sure that at least 500 ms pass between calls.
-    if(timeDiffMS < 500)
+    if(timeDiffMS >= 500) 
     {
-        usleep((500-timeDiffMS)*1000);
-        return CpuUsageMultiCore(numCores, array);
+        if(Update(timeDiffMS) != 0) 
+        {
+           return -1;
+        }
+        _lastTime = now;
     }
-    _lastTime = now;
+    
+    numCores = _cpuCount;
+    array = _cpuUsage;
+    return _totalCpuUsage / _cpuCount;
+}
 
-     kern_return_t error = host_processor_info(mach_host_self(),
+WebRtc_Word32 CpuWrapperMac::Update(WebRtc_Word64 timeDiffMS)
+{    
+    natural_t cpuCount;
+    processor_info_array_t infoArray;
+    mach_msg_type_number_t infoCount;
+    
+    kern_return_t error = host_processor_info(mach_host_self(),
                                               PROCESSOR_CPU_LOAD_INFO,
                                               &cpuCount,
                                               &infoArray,
@@ -103,7 +116,7 @@
     processor_cpu_load_info_data_t* cpuLoadInfo =
         (processor_cpu_load_info_data_t*) infoArray;
 
-    WebRtc_Word32 totalCpuUsage = 0;
+    _totalCpuUsage = 0;
     for (unsigned int cpu = 0; cpu < cpuCount; cpu++)
     {
         WebRtc_Word64 ticks = 0;
@@ -120,13 +133,11 @@
                                               timeDiffMS);
         }
         _lastTickCount[cpu] = ticks;
-        totalCpuUsage += _cpuUsage[cpu];
+        _totalCpuUsage += _cpuUsage[cpu];
     }
 
     vm_deallocate(mach_task_self(), (vm_address_t)infoArray, infoCount);
 
-    numCores = cpuCount;
-    array = _cpuUsage;
-    return totalCpuUsage/cpuCount;
+    return 0;
 }
 } // namespace webrtc
diff --git a/src/system_wrappers/source/cpu_mac.h b/src/system_wrappers/source/cpu_mac.h
index 04cd097..f9f8207 100644
--- a/src/system_wrappers/source/cpu_mac.h
+++ b/src/system_wrappers/source/cpu_mac.h
@@ -35,7 +35,11 @@
     virtual void Stop() {}
 
 private:
+    WebRtc_Word32 Update(WebRtc_Word64 timeDiffMS);
+    
+    WebRtc_UWord32  _cpuCount;
     WebRtc_UWord32* _cpuUsage;
+    WebRtc_Word32   _totalCpuUsage;
     WebRtc_Word64*  _lastTickCount;
     WebRtc_Word64   _lastTime;
 };
diff --git a/src/system_wrappers/source/cpu_no_op.cc b/src/system_wrappers/source/cpu_no_op.cc
new file mode 100644
index 0000000..e42ef91
--- /dev/null
+++ b/src/system_wrappers/source/cpu_no_op.cc
@@ -0,0 +1,22 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+
+#include "cpu_wrapper.h"
+
+namespace webrtc {
+
+CpuWrapper* CpuWrapper::CreateCpu()
+{
+    return NULL;
+}
+
+} // namespace webrtc
diff --git a/src/system_wrappers/source/cpu_wrapper_unittest.cc b/src/system_wrappers/source/cpu_wrapper_unittest.cc
new file mode 100644
index 0000000..dd49c3a
--- /dev/null
+++ b/src/system_wrappers/source/cpu_wrapper_unittest.cc
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/interface/cpu_wrapper.h"
+
+#include "gtest/gtest.h"
+#include "system_wrappers/interface/cpu_info.h"
+#include "system_wrappers/interface/event_wrapper.h"
+#include "system_wrappers/interface/scoped_ptr.h"
+#include "system_wrappers/interface/trace.h"
+#include "testsupport/fileutils.h"
+
+using webrtc::CpuInfo;
+using webrtc::CpuWrapper;
+using webrtc::EventWrapper;
+using webrtc::scoped_ptr;
+using webrtc::Trace;
+
+TEST(CpuWrapperTest, Usage) {
+  Trace::CreateTrace();
+  std::string trace_file = webrtc::test::OutputPath() +
+      "cpu_wrapper_unittest.txt";
+  Trace::SetTraceFile(trace_file.c_str());
+  Trace::SetLevelFilter(webrtc::kTraceAll);
+  printf("Number of cores detected:%u\n", CpuInfo::DetectNumberOfCores());
+  scoped_ptr<CpuWrapper> cpu(CpuWrapper::CreateCpu());
+  ASSERT_TRUE(cpu.get() != NULL);
+  scoped_ptr<EventWrapper> sleep_event(EventWrapper::Create());
+  ASSERT_TRUE(sleep_event.get() != NULL);
+
+  int num_iterations = 0;
+  WebRtc_UWord32 num_cores = 0;
+  WebRtc_UWord32* cores = NULL;
+  bool cpu_usage_available = cpu->CpuUsageMultiCore(num_cores, cores) != -1;
+  // Initializing the CPU measurements may take a couple of seconds on Windows.
+  // Since the initialization is lazy we need to wait until it is completed.
+  // Should not take more than 10000 ms.
+  while (cpu_usage_available && (++num_iterations < 10000)) {
+    if (cores != NULL) {
+      ASSERT_GT(num_cores, 0u);
+      break;
+    }
+    sleep_event->Wait(1);
+    cpu_usage_available = cpu->CpuUsageMultiCore(num_cores, cores) != -1;
+  }
+  ASSERT_TRUE(cpu_usage_available);
+
+  const WebRtc_Word32 average = cpu->CpuUsageMultiCore(num_cores, cores);
+  ASSERT_TRUE(cores != NULL);
+  EXPECT_GT(num_cores, 0u);
+  EXPECT_GE(average, 0);
+  EXPECT_LE(average, 100);
+
+  printf("\nNumber of cores:%d\n", num_cores);
+  printf("Average cpu:%d\n", average);
+  for (WebRtc_UWord32 i = 0; i < num_cores; i++) {
+    printf("Core:%u CPU:%u \n", i, cores[i]);
+    EXPECT_GE(cores[i], 0u);
+    EXPECT_LE(cores[i], 100u);
+  }
+
+  Trace::ReturnTrace();
+};
diff --git a/src/system_wrappers/source/critical_section.cc b/src/system_wrappers/source/critical_section.cc
index 213c352..d3f3f01 100644
--- a/src/system_wrappers/source/critical_section.cc
+++ b/src/system_wrappers/source/critical_section.cc
@@ -10,9 +10,9 @@
 
 #if defined(_WIN32)
     #include <windows.h>
-    #include "critical_section_windows.h"
+    #include "critical_section_win.h"
 #else
-    #include "critical_section_linux.h"
+    #include "critical_section_posix.h"
 #endif
 
 namespace webrtc {
@@ -21,7 +21,7 @@
 #ifdef _WIN32
     return new CriticalSectionWindows();
 #else
-    return new CriticalSectionLinux();
+    return new CriticalSectionPosix();
 #endif
 }
 } // namespace webrtc
diff --git a/src/system_wrappers/source/critical_section_linux.cc b/src/system_wrappers/source/critical_section_posix.cc
similarity index 79%
rename from src/system_wrappers/source/critical_section_linux.cc
rename to src/system_wrappers/source/critical_section_posix.cc
index 35e81ae..b499b9f 100644
--- a/src/system_wrappers/source/critical_section_linux.cc
+++ b/src/system_wrappers/source/critical_section_posix.cc
@@ -8,10 +8,10 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "critical_section_linux.h"
+#include "critical_section_posix.h"
 
 namespace webrtc {
-CriticalSectionLinux::CriticalSectionLinux()
+CriticalSectionPosix::CriticalSectionPosix()
 {
     pthread_mutexattr_t attr;
     pthread_mutexattr_init(&attr);
@@ -19,19 +19,19 @@
     pthread_mutex_init(&_mutex, &attr);
 }
 
-CriticalSectionLinux::~CriticalSectionLinux()
+CriticalSectionPosix::~CriticalSectionPosix()
 {
     pthread_mutex_destroy(&_mutex);
 }
 
 void
-CriticalSectionLinux::Enter()
+CriticalSectionPosix::Enter()
 {
     pthread_mutex_lock(&_mutex);
 }
 
 void
-CriticalSectionLinux::Leave()
+CriticalSectionPosix::Leave()
 {
     pthread_mutex_unlock(&_mutex);
 }
diff --git a/src/system_wrappers/source/critical_section_linux.h b/src/system_wrappers/source/critical_section_posix.h
similarity index 63%
rename from src/system_wrappers/source/critical_section_linux.h
rename to src/system_wrappers/source/critical_section_posix.h
index 5ada1cb..40b7dc9 100644
--- a/src/system_wrappers/source/critical_section_linux.h
+++ b/src/system_wrappers/source/critical_section_posix.h
@@ -8,28 +8,28 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_CRITICAL_SECTION_LINUX_H_
-#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_CRITICAL_SECTION_LINUX_H_
+#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_CRITICAL_SECTION_POSIX_H_
+#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_CRITICAL_SECTION_POSIX_H_
 
 #include "critical_section_wrapper.h"
 
 #include <pthread.h>
 
 namespace webrtc {
-class CriticalSectionLinux : public CriticalSectionWrapper
+class CriticalSectionPosix : public CriticalSectionWrapper
 {
 public:
-    CriticalSectionLinux();
+    CriticalSectionPosix();
 
-    virtual ~CriticalSectionLinux();
+    virtual ~CriticalSectionPosix();
 
     virtual void Enter();
     virtual void Leave();
 
 private:
     pthread_mutex_t _mutex;
-    friend class ConditionVariableLinux;
+    friend class ConditionVariablePosix;
 };
 } // namespace webrtc
 
-#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_CRITICAL_SECTION_LINUX_H_
+#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_CRITICAL_SECTION_POSIX_H_
diff --git a/src/system_wrappers/source/data_log.cc b/src/system_wrappers/source/data_log.cc
new file mode 100644
index 0000000..f123896
--- /dev/null
+++ b/src/system_wrappers/source/data_log.cc
@@ -0,0 +1,455 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "data_log.h"
+
+#include <assert.h>
+
+#include <algorithm>
+#include <list>
+
+#include "critical_section_wrapper.h"
+#include "event_wrapper.h"
+#include "file_wrapper.h"
+#include "rw_lock_wrapper.h"
+#include "thread_wrapper.h"
+
+namespace webrtc {
+
+DataLogImpl::CritSectScopedPtr DataLogImpl::crit_sect_(
+  CriticalSectionWrapper::CreateCriticalSection());
+
+DataLogImpl* DataLogImpl::instance_ = NULL;
+
+// A Row contains cells, which are indexed by the column names as std::string.
+// The string index is treated in a case sensitive way.
+class Row {
+ public:
+  Row();
+  ~Row();
+
+  // Inserts a Container into the cell of the column specified with
+  // column_name.
+  // column_name is treated in a case sensitive way.
+  int InsertCell(const std::string& column_name,
+                 const Container* value_container);
+
+  // Converts the value at the column specified by column_name to a string
+  // stored in value_string.
+  // column_name is treated in a case sensitive way.
+  void ToString(const std::string& column_name, std::string* value_string);
+
+ private:
+  // Collection of containers indexed by column name as std::string
+  typedef std::map<std::string, const Container*> CellMap;
+
+  CellMap                   cells_;
+  CriticalSectionWrapper*   cells_lock_;
+};
+
+// A LogTable contains multiple rows, where only the latest row is active for
+// editing. The rows are defined by the ColumnMap, which contains the name of
+// each column and the length of the column (1 for one-value-columns and greater
+// than 1 for multi-value-columns).
+class LogTable {
+ public:
+  LogTable();
+  ~LogTable();
+
+  // Adds the column with name column_name to the table. The column will be a
+  // multi-value-column if multi_value_length is greater than 1.
+  // column_name is treated in a case sensitive way.
+  int AddColumn(const std::string& column_name, int multi_value_length);
+
+  // Buffers the current row while it is waiting to be written to file,
+  // which is done by a call to Flush(). A new row is available when the
+  // function returns
+  void NextRow();
+
+  // Inserts a Container into the cell of the column specified with
+  // column_name.
+  // column_name is treated in a case sensitive way.
+  int InsertCell(const std::string& column_name,
+                 const Container* value_container);
+
+  // Creates a log file, named as specified in the string file_name, to
+  // where the table will be written when calling Flush().
+  int CreateLogFile(const std::string& file_name);
+
+  // Write all complete rows to file.
+  // May not be called by two threads simultaneously (doing so may result in
+  // a race condition). Will be called by the file_writer_thread_ when that
+  // thread is running.
+  void Flush();
+
+ private:
+  // Collection of multi_value_lengths indexed by column name as std::string
+  typedef std::map<std::string, int> ColumnMap;
+  typedef std::list<Row*> RowList;
+
+  ColumnMap               columns_;
+  RowList                 rows_[2];
+  RowList*                rows_history_;
+  RowList*                rows_flush_;
+  Row*                    current_row_;
+  FileWrapper*            file_;
+  bool                    write_header_;
+  CriticalSectionWrapper* table_lock_;
+};
+
+Row::Row()
+  : cells_(),
+    cells_lock_(CriticalSectionWrapper::CreateCriticalSection()) {
+}
+
+Row::~Row() {
+  for (CellMap::iterator it = cells_.begin(); it != cells_.end();) {
+    delete it->second;
+    // For maps all iterators (except the erased) are valid after an erase
+    cells_.erase(it++);
+  }
+  delete cells_lock_;
+}
+
+int Row::InsertCell(const std::string& column_name,
+                    const Container* value_container) {
+  CriticalSectionScoped synchronize(cells_lock_);
+  assert(cells_.count(column_name) == 0);
+  if (cells_.count(column_name) > 0)
+    return -1;
+  cells_[column_name] = value_container;
+  return 0;
+}
+
+void Row::ToString(const std::string& column_name,
+                   std::string* value_string) {
+  CriticalSectionScoped synchronize(cells_lock_);
+  const Container* container = cells_[column_name];
+  if (container == NULL) {
+    *value_string = "NaN,";
+    return;
+  }
+  container->ToString(value_string);
+}
+
+LogTable::LogTable()
+  : columns_(),
+    rows_(),
+    rows_history_(&rows_[0]),
+    rows_flush_(&rows_[1]),
+    current_row_(new Row),
+    file_(FileWrapper::Create()),
+    write_header_(true),
+    table_lock_(CriticalSectionWrapper::CreateCriticalSection()) {
+}
+
+LogTable::~LogTable() {
+  for (RowList::iterator row_it = rows_history_->begin();
+       row_it != rows_history_->end();) {
+    delete *row_it;
+    row_it = rows_history_->erase(row_it);
+  }
+  for (ColumnMap::iterator col_it = columns_.begin();
+       col_it != columns_.end();) {
+    // For maps all iterators (except the erased) are valid after an erase
+    columns_.erase(col_it++);
+  }
+  if (file_ != NULL) {
+    file_->Flush();
+    file_->CloseFile();
+    delete file_;
+  }
+  delete current_row_;
+  delete table_lock_;
+}
+
+int LogTable::AddColumn(const std::string& column_name,
+                        int multi_value_length) {
+  assert(multi_value_length > 0);
+  if (!write_header_) {
+    // It's not allowed to add new columns after the header
+    // has been written.
+    assert(false);
+    return -1;
+  } else {
+    CriticalSectionScoped synchronize(table_lock_);
+    if (write_header_)
+      columns_[column_name] = multi_value_length;
+    else
+      return -1;
+  }
+  return 0;
+}
+
+void LogTable::NextRow() {
+  CriticalSectionScoped sync_rows(table_lock_);
+  rows_history_->push_back(current_row_);
+  current_row_ = new Row;
+}
+
+int LogTable::InsertCell(const std::string& column_name,
+                         const Container* value_container) {
+  CriticalSectionScoped synchronize(table_lock_);
+  assert(columns_.count(column_name) > 0);
+  if (columns_.count(column_name) == 0)
+    return -1;
+  return current_row_->InsertCell(column_name, value_container);
+}
+
+int LogTable::CreateLogFile(const std::string& file_name) {
+  if (file_name.length() == 0)
+    return -1;
+  if (file_->Open())
+    return -1;
+  file_->OpenFile(file_name.c_str(),
+                  false,  // Open with read/write permissions
+                  false,  // Don't wraparound and write at the beginning when
+                          // the file is full
+                  true);  // Open as a text file
+  if (file_ == NULL)
+    return -1;
+  return 0;
+}
+
+void LogTable::Flush() {
+  ColumnMap::iterator column_it;
+  bool commit_header = false;
+  if (write_header_) {
+    CriticalSectionScoped synchronize(table_lock_);
+    if (write_header_) {
+      commit_header = true;
+      write_header_ = false;
+    }
+  }
+  if (commit_header) {
+    for (column_it = columns_.begin();
+         column_it != columns_.end(); ++column_it) {
+      if (column_it->second > 1) {
+        file_->WriteText("%s[%u],", column_it->first.c_str(),
+                         column_it->second);
+        for (int i = 1; i < column_it->second; ++i)
+          file_->WriteText(",");
+      } else {
+        file_->WriteText("%s,", column_it->first.c_str());
+      }
+    }
+    if (columns_.size() > 0)
+      file_->WriteText("\n");
+  }
+
+  // Swap the list used for flushing with the list containing the row history
+  // and clear the history. We also create a local pointer to the new
+  // list used for flushing to avoid race conditions if another thread
+  // calls this function while we are writing.
+  // We don't want to block the list while we're writing to file.
+  {
+    CriticalSectionScoped synchronize(table_lock_);
+    RowList* tmp = rows_flush_;
+    rows_flush_ = rows_history_;
+    rows_history_ = tmp;
+    rows_history_->clear();
+  }
+
+  // Write all complete rows to file and delete them
+  for (RowList::iterator row_it = rows_flush_->begin();
+       row_it != rows_flush_->end();) {
+    for (column_it = columns_.begin();
+         column_it != columns_.end(); ++column_it) {
+      std::string row_string;
+      (*row_it)->ToString(column_it->first, &row_string);
+      file_->WriteText("%s", row_string.c_str());
+    }
+    if (columns_.size() > 0)
+      file_->WriteText("\n");
+    delete *row_it;
+    row_it = rows_flush_->erase(row_it);
+  }
+}
+
+int DataLog::CreateLog() {
+  return DataLogImpl::CreateLog();
+}
+
+void DataLog::ReturnLog() {
+  return DataLogImpl::ReturnLog();
+}
+
+std::string DataLog::Combine(const std::string& table_name, int table_id) {
+  std::stringstream ss;
+  std::string combined_id = table_name;
+  std::string number_suffix;
+  ss << "_" << table_id;
+  ss >> number_suffix;
+  combined_id += number_suffix;
+  std::transform(combined_id.begin(), combined_id.end(), combined_id.begin(),
+                 ::tolower);
+  return combined_id;
+}
+
+int DataLog::AddTable(const std::string& table_name) {
+  DataLogImpl* data_log = DataLogImpl::StaticInstance();
+  if (data_log == NULL)
+    return -1;
+  return data_log->AddTable(table_name);
+}
+
+int DataLog::AddColumn(const std::string& table_name,
+                       const std::string& column_name,
+                       int multi_value_length) {
+  DataLogImpl* data_log = DataLogImpl::StaticInstance();
+  if (data_log == NULL)
+    return -1;
+  return data_log->DataLogImpl::StaticInstance()->AddColumn(table_name,
+                                                            column_name,
+                                                            multi_value_length);
+}
+
+int DataLog::NextRow(const std::string& table_name) {
+  DataLogImpl* data_log = DataLogImpl::StaticInstance();
+  if (data_log == NULL)
+    return -1;
+  return data_log->DataLogImpl::StaticInstance()->NextRow(table_name);
+}
+
+DataLogImpl::DataLogImpl()
+  : counter_(1),
+    tables_(),
+    flush_event_(EventWrapper::Create()),
+    file_writer_thread_(NULL),
+    tables_lock_(RWLockWrapper::CreateRWLock()) {
+}
+
+DataLogImpl::~DataLogImpl() {
+  StopThread();
+  Flush();  // Write any remaining rows
+  delete file_writer_thread_;
+  delete flush_event_;
+  for (TableMap::iterator it = tables_.begin(); it != tables_.end();) {
+    delete static_cast<LogTable*>(it->second);
+    // For maps all iterators (except the erased) are valid after an erase
+    tables_.erase(it++);
+  }
+  delete tables_lock_;
+}
+
+int DataLogImpl::CreateLog() {
+  CriticalSectionScoped synchronize(crit_sect_.get());
+  if (instance_ == NULL) {
+    instance_ = new DataLogImpl();
+    return instance_->Init();
+  } else {
+    ++instance_->counter_;
+  }
+  return 0;
+}
+
+int DataLogImpl::Init() {
+  file_writer_thread_ = ThreadWrapper::CreateThread(
+                          DataLogImpl::Run,
+                          instance_,
+                          kHighestPriority,
+                          "DataLog");
+  if (file_writer_thread_ == NULL)
+    return -1;
+  unsigned int thread_id = 0;
+  bool success = file_writer_thread_->Start(thread_id);
+  if (!success)
+    return -1;
+  return 0;
+}
+
+DataLogImpl* DataLogImpl::StaticInstance() {
+  return instance_;
+}
+
+void DataLogImpl::ReturnLog() {
+  CriticalSectionScoped synchronize(crit_sect_.get());
+  if (instance_ && instance_->counter_ > 1) {
+    --instance_->counter_;
+    return;
+  }
+  delete instance_;
+  instance_ = NULL;
+}
+
+int DataLogImpl::AddTable(const std::string& table_name) {
+  WriteLockScoped synchronize(*tables_lock_);
+  // Make sure we don't add a table which already exists
+  if (tables_.count(table_name) > 0)
+    return -1;
+  tables_[table_name] = new LogTable();
+  if (tables_[table_name]->CreateLogFile(table_name + ".txt") == -1)
+    return -1;
+  return 0;
+}
+
+int DataLogImpl::AddColumn(const std::string& table_name,
+                           const std::string& column_name,
+                           int multi_value_length) {
+  ReadLockScoped synchronize(*tables_lock_);
+  if (tables_.count(table_name) == 0)
+    return -1;
+  return tables_[table_name]->AddColumn(column_name, multi_value_length);
+}
+
+int DataLogImpl::InsertCell(const std::string& table_name,
+                            const std::string& column_name,
+                            const Container* value_container) {
+  ReadLockScoped synchronize(*tables_lock_);
+  assert(tables_.count(table_name) > 0);
+  if (tables_.count(table_name) == 0)
+    return -1;
+  return tables_[table_name]->InsertCell(column_name, value_container);
+}
+
+int DataLogImpl::NextRow(const std::string& table_name) {
+  ReadLockScoped synchronize(*tables_lock_);
+  if (tables_.count(table_name) == 0)
+    return -1;
+  tables_[table_name]->NextRow();
+  if (file_writer_thread_ == NULL) {
+    // Write every row to file as they get complete.
+    tables_[table_name]->Flush();
+  } else {
+    // Signal a complete row
+    flush_event_->Set();
+  }
+  return 0;
+}
+
+void DataLogImpl::Flush() {
+  ReadLockScoped synchronize(*tables_lock_);
+  for (TableMap::iterator it = tables_.begin(); it != tables_.end(); ++it) {
+    it->second->Flush();
+  }
+}
+
+bool DataLogImpl::Run(void* obj) {
+  static_cast<DataLogImpl*>(obj)->Process();
+  return true;
+}
+
+void DataLogImpl::Process() {
+  // Wait for a row to be complete
+  flush_event_->Wait(WEBRTC_EVENT_INFINITE);
+  Flush();
+}
+
+void DataLogImpl::StopThread() {
+  if (file_writer_thread_ != NULL) {
+    file_writer_thread_->SetNotAlive();
+    flush_event_->Set();
+    // Call Stop() repeatedly, waiting for the Flush() call in Process() to
+    // finish.
+    while (!file_writer_thread_->Stop()) continue;
+  }
+}
+
+}  // namespace webrtc
diff --git a/src/system_wrappers/source/data_log_c.cc b/src/system_wrappers/source/data_log_c.cc
new file mode 100644
index 0000000..f8d7efd
--- /dev/null
+++ b/src/system_wrappers/source/data_log_c.cc
@@ -0,0 +1,145 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This is the pure C wrapper of the DataLog class.
+ */
+
+#include "system_wrappers/interface/data_log_c.h"
+
+#include <string>
+
+#include "system_wrappers/interface/data_log.h"
+
+extern "C" int WebRtcDataLog_CreateLog() {
+  return webrtc::DataLog::CreateLog();
+}
+
+extern "C" void WebRtcDataLog_ReturnLog() {
+  return webrtc::DataLog::ReturnLog();
+}
+
+extern "C" char* WebRtcDataLog_Combine(char* combined_name, size_t combined_len,
+                                       const char* table_name, int table_id) {
+  if (!table_name) return NULL;
+  std::string combined = webrtc::DataLog::Combine(table_name, table_id);
+  if (combined.size() >= combined_len) return NULL;
+  std::copy(combined.begin(), combined.end(), combined_name);
+  combined_name[combined.size()] = '\0';
+  return combined_name;
+}
+
+extern "C" int WebRtcDataLog_AddTable(const char* table_name) {
+  if (!table_name) return -1;
+  return webrtc::DataLog::AddTable(table_name);
+}
+
+extern "C" int WebRtcDataLog_AddColumn(const char* table_name,
+                                       const char* column_name,
+                                       int multi_value_length) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::AddColumn(table_name, column_name,
+                                    multi_value_length);
+}
+
+extern "C" int WebRtcDataLog_InsertCell_int(const char* table_name,
+                                            const char* column_name,
+                                            int value) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, value);
+}
+
+extern "C" int WebRtcDataLog_InsertArray_int(const char* table_name,
+                                             const char* column_name,
+                                             const int* values,
+                                             int length) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, values, length);
+}
+
+extern "C" int WebRtcDataLog_InsertCell_float(const char* table_name,
+                                              const char* column_name,
+                                              float value) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, value);
+}
+
+extern "C" int WebRtcDataLog_InsertArray_float(const char* table_name,
+                                               const char* column_name,
+                                               const float* values,
+                                               int length) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, values, length);
+}
+
+extern "C" int WebRtcDataLog_InsertCell_double(const char* table_name,
+                                               const char* column_name,
+                                               double value) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, value);
+}
+
+extern "C" int WebRtcDataLog_InsertArray_double(const char* table_name,
+                                                const char* column_name,
+                                                const double* values,
+                                                int length) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, values, length);
+}
+
+extern "C" int WebRtcDataLog_InsertCell_int32(const char* table_name,
+                                              const char* column_name,
+                                              int32_t value) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, value);
+}
+
+extern "C" int WebRtcDataLog_InsertArray_int32(const char* table_name,
+                                               const char* column_name,
+                                               const int32_t* values,
+                                               int length) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, values, length);
+}
+
+extern "C" int WebRtcDataLog_InsertCell_uint32(const char* table_name,
+                                               const char* column_name,
+                                               uint32_t value) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, value);
+}
+
+extern "C" int WebRtcDataLog_InsertArray_uint32(const char* table_name,
+                                                const char* column_name,
+                                                const uint32_t* values,
+                                                int length) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, values, length);
+}
+
+extern "C" int WebRtcDataLog_InsertCell_int64(const char* table_name,
+                                              const char* column_name,
+                                              int64_t value) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, value);
+}
+
+extern "C" int WebRtcDataLog_InsertArray_int64(const char* table_name,
+                                               const char* column_name,
+                                               const int64_t* values,
+                                               int length) {
+  if (!table_name || !column_name) return -1;
+  return webrtc::DataLog::InsertCell(table_name, column_name, values, length);
+}
+
+extern "C" int WebRtcDataLog_NextRow(const char* table_name) {
+  if (!table_name) return -1;
+  return webrtc::DataLog::NextRow(table_name);
+}
diff --git a/src/system_wrappers/source/data_log_c_helpers_unittest.c b/src/system_wrappers/source/data_log_c_helpers_unittest.c
new file mode 100644
index 0000000..e78a0e3
--- /dev/null
+++ b/src/system_wrappers/source/data_log_c_helpers_unittest.c
@@ -0,0 +1,124 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/source/data_log_c_helpers_unittest.h"
+
+#include <assert.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "system_wrappers/interface/data_log_c.h"
+
+enum { kTestArrayLen = 4 };
+static const char kTableName[] = "c_wrapper_table";
+static const char kColumnName1[] = "Scalar";
+static const char kColumnName2[] = "Vector";
+
+int WebRtcDataLogCHelper_TestCreateLog() {
+  return WebRtcDataLog_CreateLog();
+}
+
+int WebRtcDataLogCHelper_TestReturnLog() {
+  WebRtcDataLog_ReturnLog();
+  return 0;
+}
+
+int WebRtcDataLogCHelper_TestCombine() {
+  const int kOutLen = strlen(kTableName) + 4;  /* Room for "_17" + '\0' */
+  char* combined_name = malloc(kOutLen * sizeof(char));
+  char* out_ptr = WebRtcDataLog_Combine(combined_name, kOutLen, kTableName, 17);
+  int return_code = 0;
+  if (!out_ptr) {
+    return_code = -1;
+  }
+  if (strcmp(combined_name, "c_wrapper_table_17") != 0) {
+    return_code = -2;
+  }
+  free(combined_name);
+  return return_code;
+}
+
+int WebRtcDataLogCHelper_TestAddTable() {
+  return WebRtcDataLog_AddTable(kTableName);
+}
+
+int WebRtcDataLogCHelper_TestAddColumn() {
+  if (WebRtcDataLog_AddColumn(kTableName, kColumnName1, 1) != 0) {
+    return -1;
+  }
+  if (WebRtcDataLog_AddColumn(kTableName, kColumnName2, kTestArrayLen) != 0) {
+    return -2;
+  }
+  return 0;
+}
+
+int WebRtcDataLogCHelper_TestNextRow() {
+  return WebRtcDataLog_NextRow(kTableName);
+}
+
+int WebRtcDataLogCHelper_TestInsertCell_int() {
+  return WebRtcDataLog_InsertCell_int(kTableName, kColumnName1, 17);
+}
+
+int WebRtcDataLogCHelper_TestInsertArray_int() {
+  int values[kTestArrayLen] = {1, 2, 3, 4};
+  return WebRtcDataLog_InsertArray_int(kTableName, kColumnName2, values,
+                                       kTestArrayLen);
+}
+
+int WebRtcDataLogCHelper_TestInsertCell_float() {
+  return WebRtcDataLog_InsertCell_float(kTableName, kColumnName1, 17.0f);
+}
+
+int WebRtcDataLogCHelper_TestInsertArray_float() {
+  float values[kTestArrayLen] = {1.0f, 2.0f, 3.0f, 4.0f};
+  return WebRtcDataLog_InsertArray_float(kTableName, kColumnName2, values,
+                                         kTestArrayLen);
+}
+
+int WebRtcDataLogCHelper_TestInsertCell_double() {
+  return WebRtcDataLog_InsertCell_int(kTableName, kColumnName1, 17.0);
+}
+
+int WebRtcDataLogCHelper_TestInsertArray_double() {
+  double values[kTestArrayLen] = {1.0, 2.0, 3.0, 4.0};
+  return WebRtcDataLog_InsertArray_double(kTableName, kColumnName2, values,
+                                          kTestArrayLen);
+}
+
+int WebRtcDataLogCHelper_TestInsertCell_int32() {
+  return WebRtcDataLog_InsertCell_int32(kTableName, kColumnName1, 17);
+}
+
+int WebRtcDataLogCHelper_TestInsertArray_int32() {
+  int32_t values[kTestArrayLen] = {1, 2, 3, 4};
+  return WebRtcDataLog_InsertArray_int32(kTableName, kColumnName2, values,
+                                         kTestArrayLen);
+}
+
+int WebRtcDataLogCHelper_TestInsertCell_uint32() {
+  return WebRtcDataLog_InsertCell_uint32(kTableName, kColumnName1, 17);
+}
+
+int WebRtcDataLogCHelper_TestInsertArray_uint32() {
+  uint32_t values[kTestArrayLen] = {1, 2, 3, 4};
+  return WebRtcDataLog_InsertArray_uint32(kTableName, kColumnName2, values,
+                                          kTestArrayLen);
+}
+
+int WebRtcDataLogCHelper_TestInsertCell_int64() {
+  return WebRtcDataLog_InsertCell_int64(kTableName, kColumnName1, 17);
+}
+
+int WebRtcDataLogCHelper_TestInsertArray_int64() {
+  int64_t values[kTestArrayLen] = {1, 2, 3, 4};
+  return WebRtcDataLog_InsertArray_int64(kTableName, kColumnName2, values,
+                                         kTestArrayLen);
+}
diff --git a/src/system_wrappers/source/data_log_c_helpers_unittest.h b/src/system_wrappers/source/data_log_c_helpers_unittest.h
new file mode 100644
index 0000000..ef86eae
--- /dev/null
+++ b/src/system_wrappers/source/data_log_c_helpers_unittest.h
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SRC_SYSTEM_WRAPPERS_SOURCE_DATA_LOG_C_HELPERS_UNITTEST_H_
+#define SRC_SYSTEM_WRAPPERS_SOURCE_DATA_LOG_C_HELPERS_UNITTEST_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int WebRtcDataLogCHelper_TestCreateLog();
+
+int WebRtcDataLogCHelper_TestReturnLog();
+
+int WebRtcDataLogCHelper_TestCombine();
+
+int WebRtcDataLogCHelper_TestAddTable();
+
+int WebRtcDataLogCHelper_TestAddColumn();
+
+int WebRtcDataLogCHelper_TestNextRow();
+
+int WebRtcDataLogCHelper_TestInsertCell_int();
+
+int WebRtcDataLogCHelper_TestInsertArray_int();
+
+int WebRtcDataLogCHelper_TestInsertCell_float();
+
+int WebRtcDataLogCHelper_TestInsertArray_float();
+
+int WebRtcDataLogCHelper_TestInsertCell_double();
+
+int WebRtcDataLogCHelper_TestInsertArray_double();
+
+int WebRtcDataLogCHelper_TestInsertCell_int32();
+
+int WebRtcDataLogCHelper_TestInsertArray_int32();
+
+int WebRtcDataLogCHelper_TestInsertCell_uint32();
+
+int WebRtcDataLogCHelper_TestInsertArray_uint32();
+
+int WebRtcDataLogCHelper_TestInsertCell_int64();
+
+int WebRtcDataLogCHelper_TestInsertArray_int64();
+
+#ifdef __cplusplus
+}  // end of extern "C"
+#endif
+
+#endif  // SRC_SYSTEM_WRAPPERS_SOURCE_DATA_LOG_C_HELPERS_UNITTEST_H_
diff --git a/src/system_wrappers/source/data_log_helpers_unittest.cc b/src/system_wrappers/source/data_log_helpers_unittest.cc
new file mode 100644
index 0000000..94b4d6e
--- /dev/null
+++ b/src/system_wrappers/source/data_log_helpers_unittest.cc
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+
+#include "data_log.h"
+#include "gtest/gtest.h"
+
+using ::webrtc::DataLog;
+
+TEST(TestDataLog, IntContainers) {
+  int c = 5;
+  webrtc::ValueContainer<int> v1(c);
+  c = 10;
+  webrtc::ValueContainer<int> v2(c);
+  std::string s1, s2;
+  v1.ToString(&s1);
+  v2.ToString(&s2);
+  ASSERT_EQ(s1, "5,");
+  ASSERT_EQ(s2, "10,");
+  v1 = v2;
+  v1.ToString(&s1);
+  ASSERT_EQ(s1, s2);
+}
+
+TEST(TestDataLog, DoubleContainers) {
+  double c = 3.5;
+  webrtc::ValueContainer<double> v1(c);
+  c = 10.3;
+  webrtc::ValueContainer<double> v2(c);
+  std::string s1, s2;
+  v1.ToString(&s1);
+  v2.ToString(&s2);
+  ASSERT_EQ(s1, "3.5,");
+  ASSERT_EQ(s2, "10.3,");
+  v1 = v2;
+  v1.ToString(&s1);
+  ASSERT_EQ(s1, s2);
+}
+
+TEST(TestDataLog, MultiValueContainers) {
+  int a[3] = {1, 2, 3};
+  int b[3] = {4, 5, 6};
+  webrtc::MultiValueContainer<int> m1(a, 3);
+  webrtc::MultiValueContainer<int> m2(b, 3);
+  webrtc::MultiValueContainer<int> m3(a, 3);
+  std::string s1, s2, s3;
+  m1.ToString(&s1);
+  m2.ToString(&s2);
+  ASSERT_EQ(s1, "1,2,3,");
+  ASSERT_EQ(s2, "4,5,6,");
+  m1 = m2;
+  m1.ToString(&s1);
+  ASSERT_EQ(s1, s2);
+  m3.ToString(&s3);
+  ASSERT_EQ(s3, "1,2,3,");
+}
diff --git a/src/system_wrappers/source/data_log_no_op.cc b/src/system_wrappers/source/data_log_no_op.cc
new file mode 100644
index 0000000..bedc82a
--- /dev/null
+++ b/src/system_wrappers/source/data_log_no_op.cc
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "data_log.h"
+
+#include <string>
+
+namespace webrtc {
+
+int DataLog::CreateLog() {
+  return 0;
+}
+
+void DataLog::ReturnLog() {
+}
+
+std::string DataLog::Combine(const std::string& table_name, int table_id) {
+  return std::string();
+}
+
+int DataLog::AddTable(const std::string& /*table_name*/) {
+  return 0;
+}
+
+int DataLog::AddColumn(const std::string& /*table_name*/,
+                       const std::string& /*column_name*/,
+                       int /*multi_value_length*/) {
+  return 0;
+}
+
+int DataLog::NextRow(const std::string& /*table_name*/) {
+  return 0;
+}
+
+DataLogImpl::DataLogImpl() {
+}
+
+DataLogImpl::~DataLogImpl() {
+}
+
+DataLogImpl* DataLogImpl::StaticInstance() {
+  return NULL;
+}
+
+void DataLogImpl::ReturnLog() {
+}
+
+int DataLogImpl::AddTable(const std::string& /*table_name*/) {
+  return 0;
+}
+
+int DataLogImpl::AddColumn(const std::string& /*table_name*/,
+                           const std::string& /*column_name*/,
+                           int /*multi_value_length*/) {
+  return 0;
+}
+
+int DataLogImpl::InsertCell(const std::string& /*table_name*/,
+                            const std::string& /*column_name*/,
+                            const Container* /*value_container*/) {
+  return 0;
+}
+
+int DataLogImpl::NextRow(const std::string& /*table_name*/) {
+  return 0;
+}
+
+void DataLogImpl::Flush() {
+}
+
+bool DataLogImpl::Run(void* /*obj*/) {
+  return true;
+}
+
+void DataLogImpl::Process() {
+}
+
+void DataLogImpl::StopThread() {
+}
+
+}  // namespace webrtc
diff --git a/src/system_wrappers/source/data_log_unittest.cc b/src/system_wrappers/source/data_log_unittest.cc
new file mode 100644
index 0000000..c64ed94
--- /dev/null
+++ b/src/system_wrappers/source/data_log_unittest.cc
@@ -0,0 +1,310 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <map>
+#include <string>
+
+#include "system_wrappers/interface/data_log.h"
+#include "system_wrappers/interface/data_log_c.h"
+#include "system_wrappers/source/data_log_c_helpers_unittest.h"
+#include "gtest/gtest.h"
+
+using ::webrtc::DataLog;
+
+// A class for storing the values expected from a log table column when
+// verifying a log table file.
+struct ExpectedValues {
+ public:
+  ExpectedValues()
+    : values(NULL),
+      multi_value_length(1) {
+  }
+
+  ExpectedValues(std::vector<std::string> expected_values,
+                 int expected_multi_value_length)
+    : values(expected_values),
+      multi_value_length(expected_multi_value_length) {
+  }
+
+  std::vector<std::string> values;
+  int multi_value_length;
+};
+
+typedef std::map<std::string, ExpectedValues> ExpectedValuesMap;
+
+// A static class used for parsing and verifying data log files.
+class DataLogParser {
+ public:
+  // Verifies that the log table stored in the file "log_file" corresponds to
+  // the cells and columns specified in "columns".
+  static int VerifyTable(FILE* log_file, const ExpectedValuesMap& columns) {
+    int row = 0;
+    char line_buffer[kMaxLineLength];
+    char* ret = fgets(line_buffer, kMaxLineLength, log_file);
+    EXPECT_FALSE(ret == NULL);
+    if (ret == NULL)
+      return -1;
+
+    std::string line(line_buffer, kMaxLineLength);
+    VerifyHeader(line, columns);
+    while (fgets(line_buffer, kMaxLineLength, log_file) != NULL) {
+      line = std::string(line_buffer, kMaxLineLength);
+      size_t line_position = 0;
+
+      for (ExpectedValuesMap::const_iterator it = columns.begin();
+           it != columns.end(); ++it) {
+        std::string str = ParseElement(line, &line_position,
+                                       it->second.multi_value_length);
+        EXPECT_EQ(str, it->second.values[row]);
+        if (str != it->second.values[row])
+          return -1;
+      }
+      ++row;
+    }
+    return 0;
+  }
+
+  // Verifies the table header stored in "line" to correspond with the header
+  // specified in "columns".
+  static int VerifyHeader(const std::string& line,
+                          const ExpectedValuesMap& columns) {
+    size_t line_position = 0;
+    for (ExpectedValuesMap::const_iterator it = columns.begin();
+         it != columns.end(); ++it) {
+      std::string str = ParseElement(line, &line_position,
+                                     it->second.multi_value_length);
+      EXPECT_EQ(str, it->first);
+      if (str != it->first)
+        return -1;
+    }
+    return 0;
+  }
+
+  // Parses out and returns one element from the string "line", which contains
+  // one line read from a log table file. An element can either be a column
+  // header or a cell of a row.
+  static std::string ParseElement(const std::string& line,
+                                  size_t* line_position,
+                                  int multi_value_length) {
+    std::string parsed_cell;
+    parsed_cell = "";
+    for (int i = 0; i < multi_value_length; ++i) {
+      size_t next_separator = line.find(',', *line_position);
+      EXPECT_NE(next_separator, std::string::npos);
+      if (next_separator == std::string::npos)
+        break;
+      parsed_cell += line.substr(*line_position,
+                                 next_separator - *line_position + 1);
+      *line_position = next_separator + 1;
+    }
+    return parsed_cell;
+  }
+
+  // This constant defines the maximum line length the DataLogParser can
+  // parse.
+  enum { kMaxLineLength = 100 };
+};
+
+TEST(TestDataLog, CreateReturnTest) {
+  for (int i = 0; i < 10; ++i)
+    ASSERT_EQ(DataLog::CreateLog(), 0);
+  ASSERT_EQ(DataLog::AddTable(DataLog::Combine("a proper table", 1)), 0);
+  for (int i = 0; i < 10; ++i)
+    DataLog::ReturnLog();
+  ASSERT_LT(DataLog::AddTable(DataLog::Combine("table failure", 1)), 0);
+}
+
+TEST(TestDataLog, VerifyCombineMethod) {
+  EXPECT_EQ(std::string("a proper table_1"),
+            DataLog::Combine("a proper table", 1));
+}
+
+TEST(TestDataLog, VerifySingleTable) {
+  DataLog::CreateLog();
+  DataLog::AddTable(DataLog::Combine("table", 1));
+  DataLog::AddColumn(DataLog::Combine("table", 1), "arrival", 1);
+  DataLog::AddColumn(DataLog::Combine("table", 1), "timestamp", 1);
+  DataLog::AddColumn(DataLog::Combine("table", 1), "size", 5);
+  WebRtc_UWord32 sizes[5] = {1400, 1500, 1600, 1700, 1800};
+  for (int i = 0; i < 10; ++i) {
+    DataLog::InsertCell(DataLog::Combine("table", 1), "arrival",
+                        static_cast<double>(i));
+    DataLog::InsertCell(DataLog::Combine("table", 1), "timestamp",
+                        static_cast<WebRtc_Word64>(4354 + i));
+    DataLog::InsertCell(DataLog::Combine("table", 1), "size", sizes, 5);
+    DataLog::NextRow(DataLog::Combine("table", 1));
+  }
+  DataLog::ReturnLog();
+  // Verify file
+  FILE* table = fopen("table_1.txt", "r");
+  ASSERT_FALSE(table == NULL);
+  // Read the column names and verify with the expected columns.
+  // Note that the columns are written to file in alphabetical order.
+  // Data expected from parsing the file
+  const int kNumberOfRows = 10;
+  std::string string_arrival[kNumberOfRows] = {
+    "0,", "1,", "2,", "3,", "4,",
+    "5,", "6,", "7,", "8,", "9,"
+  };
+  std::string string_timestamp[kNumberOfRows] = {
+    "4354,", "4355,", "4356,", "4357,",
+    "4358,", "4359,", "4360,", "4361,",
+    "4362,", "4363,"
+  };
+  std::string string_sizes = "1400,1500,1600,1700,1800,";
+  ExpectedValuesMap expected;
+  expected["arrival,"] = ExpectedValues(
+                           std::vector<std::string>(string_arrival,
+                                                    string_arrival +
+                                                    kNumberOfRows),
+                           1);
+  expected["size[5],,,,,"] = ExpectedValues(
+                               std::vector<std::string>(10, string_sizes), 5);
+  expected["timestamp,"] = ExpectedValues(
+                             std::vector<std::string>(string_timestamp,
+                                                      string_timestamp +
+                                                      kNumberOfRows),
+                             1);
+  ASSERT_EQ(DataLogParser::VerifyTable(table, expected), 0);
+  fclose(table);
+}
+
+TEST(TestDataLog, VerifyMultipleTables) {
+  DataLog::CreateLog();
+  DataLog::AddTable(DataLog::Combine("table", 2));
+  DataLog::AddTable(DataLog::Combine("table", 3));
+  DataLog::AddColumn(DataLog::Combine("table", 2), "arrival", 1);
+  DataLog::AddColumn(DataLog::Combine("table", 2), "timestamp", 1);
+  DataLog::AddColumn(DataLog::Combine("table", 2), "size", 1);
+  DataLog::AddTable(DataLog::Combine("table", 4));
+  DataLog::AddColumn(DataLog::Combine("table", 3), "timestamp", 1);
+  DataLog::AddColumn(DataLog::Combine("table", 3), "arrival", 1);
+  DataLog::AddColumn(DataLog::Combine("table", 4), "size", 1);
+  for (WebRtc_Word32 i = 0; i < 10; ++i) {
+    DataLog::InsertCell(DataLog::Combine("table", 2), "arrival",
+                        static_cast<WebRtc_Word32>(i));
+    DataLog::InsertCell(DataLog::Combine("table", 2), "timestamp",
+                        static_cast<WebRtc_Word32>(4354 + i));
+    DataLog::InsertCell(DataLog::Combine("table", 2), "size",
+                        static_cast<WebRtc_Word32>(1200 + 10 * i));
+    DataLog::InsertCell(DataLog::Combine("table", 3), "timestamp",
+                        static_cast<WebRtc_Word32>(4354 + i));
+    DataLog::InsertCell(DataLog::Combine("table", 3), "arrival",
+                        static_cast<WebRtc_Word32>(i));
+    DataLog::InsertCell(DataLog::Combine("table", 4), "size",
+                        static_cast<WebRtc_Word32>(1200 + 10 * i));
+    DataLog::NextRow(DataLog::Combine("table", 4));
+    DataLog::NextRow(DataLog::Combine("table", 2));
+    DataLog::NextRow(DataLog::Combine("table", 3));
+  }
+  DataLog::ReturnLog();
+
+  // Data expected from parsing the file
+  const int kNumberOfRows = 10;
+  std::string string_arrival[kNumberOfRows] = {
+    "0,", "1,", "2,", "3,", "4,",
+    "5,", "6,", "7,", "8,", "9,"
+  };
+  std::string string_timestamp[kNumberOfRows] = {
+    "4354,", "4355,", "4356,", "4357,",
+    "4358,", "4359,", "4360,", "4361,",
+    "4362,", "4363,"
+  };
+  std::string string_size[kNumberOfRows] = {
+    "1200,", "1210,", "1220,", "1230,",
+    "1240,", "1250,", "1260,", "1270,",
+    "1280,", "1290,"
+  };
+
+  // Verify table 2
+  {
+    FILE* table = fopen("table_2.txt", "r");
+    ASSERT_FALSE(table == NULL);
+    ExpectedValuesMap expected;
+    expected["arrival,"] = ExpectedValues(
+                             std::vector<std::string>(string_arrival,
+                                                      string_arrival +
+                                                      kNumberOfRows),
+                             1);
+    expected["size,"] = ExpectedValues(
+                          std::vector<std::string>(string_size,
+                                                   string_size + kNumberOfRows),
+                          1);
+    expected["timestamp,"] = ExpectedValues(
+                               std::vector<std::string>(string_timestamp,
+                                                        string_timestamp +
+                                                        kNumberOfRows),
+                               1);
+    ASSERT_EQ(DataLogParser::VerifyTable(table, expected), 0);
+    fclose(table);
+  }
+
+  // Verify table 3
+  {
+    FILE* table = fopen("table_3.txt", "r");
+    ASSERT_FALSE(table == NULL);
+    ExpectedValuesMap expected;
+    expected["arrival,"] = ExpectedValues(
+                             std::vector<std::string>(string_arrival,
+                                                      string_arrival +
+                                                      kNumberOfRows),
+                             1);
+    expected["timestamp,"] = ExpectedValues(
+                             std::vector<std::string>(string_timestamp,
+                                                      string_timestamp +
+                                                      kNumberOfRows),
+                               1);
+    ASSERT_EQ(DataLogParser::VerifyTable(table, expected), 0);
+    fclose(table);
+  }
+
+  // Verify table 4
+  {
+    FILE* table = fopen("table_4.txt", "r");
+    ASSERT_FALSE(table == NULL);
+    ExpectedValuesMap expected;
+    expected["size,"] = ExpectedValues(
+                          std::vector<std::string>(string_size,
+                                                   string_size +
+                                                   kNumberOfRows),
+                          1);
+    ASSERT_EQ(DataLogParser::VerifyTable(table, expected), 0);
+    fclose(table);
+  }
+}
+
+TEST(TestDataLogCWrapper, VerifyCWrapper) {
+  // Simply call all C wrapper log functions through the C helper unittests.
+  // Main purpose is to make sure that the linkage is correct.
+
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestCreateLog());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestCombine());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestAddTable());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestAddColumn());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertCell_int());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertArray_int());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestNextRow());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertCell_float());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertArray_float());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestNextRow());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertCell_double());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertArray_double());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestNextRow());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertCell_int32());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertArray_int32());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestNextRow());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertCell_uint32());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertArray_uint32());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestNextRow());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertCell_int64());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestInsertArray_int64());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestNextRow());
+  EXPECT_EQ(0, WebRtcDataLogCHelper_TestReturnLog());
+}
diff --git a/src/system_wrappers/source/data_log_unittest_disabled.cc b/src/system_wrappers/source/data_log_unittest_disabled.cc
new file mode 100644
index 0000000..9d630b6
--- /dev/null
+++ b/src/system_wrappers/source/data_log_unittest_disabled.cc
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/interface/data_log.h"
+
+#include <cstdio>
+
+#include "gtest/gtest.h"
+
+using ::webrtc::DataLog;
+
+const char* kDataLogFileName = "table_1.txt";
+
+void PerformLogging(std::string table_name) {
+  // Simulate normal DataTable logging behavior using this table name.
+  ASSERT_EQ(0, DataLog::AddTable(table_name));
+  ASSERT_EQ(0, DataLog::AddColumn(table_name, "test", 1));
+  for (int i = 0; i < 10; ++i) {
+    // TODO(kjellander): Check InsertCell result when the DataLog dummy is
+    // fixed.
+    DataLog::InsertCell(table_name, "test", static_cast<double>(i));
+    ASSERT_EQ(0, DataLog::NextRow(table_name));
+  }
+}
+
+// Simple test to verify DataLog is still working when the GYP variable
+// enable_data_logging==0 (the default case).
+TEST(TestDataLogDisabled, VerifyLoggingWorks) {
+  ASSERT_EQ(0, DataLog::CreateLog());
+  // Generate a table_name name and assure it's an empty string
+  // (dummy behavior).
+  std::string table_name = DataLog::Combine("table", 1);
+  ASSERT_EQ("", table_name);
+  PerformLogging(table_name);
+  DataLog::ReturnLog();
+}
+
+TEST(TestDataLogDisabled, EnsureNoFileIsWritten) {
+  // Remove any previous data files on disk:
+  std::remove(kDataLogFileName);
+  ASSERT_EQ(0, DataLog::CreateLog());
+  // Don't use the table name we would get from Combine on a disabled DataLog.
+  // Use "table_1" instead (which is what an enabled DataLog would give us).
+  PerformLogging("table_1");
+  DataLog::ReturnLog();
+  // Verify no data log file have been written:
+  ASSERT_EQ(NULL, fopen(kDataLogFileName, "r"));
+}
diff --git a/src/system_wrappers/source/event.cc b/src/system_wrappers/source/event.cc
index 384b961..608cd53 100644
--- a/src/system_wrappers/source/event.cc
+++ b/src/system_wrappers/source/event.cc
@@ -12,10 +12,14 @@
 
 #if defined(_WIN32)
     #include <windows.h>
-    #include "event_windows.h"
+    #include "event_win.h"
+#elif defined(WEBRTC_MAC_INTEL)
+    #include <ApplicationServices/ApplicationServices.h>
+    #include <pthread.h>
+    #include "event_posix.h"
 #else
     #include <pthread.h>
-    #include "event_linux.h"
+    #include "event_posix.h"
 #endif
 
 namespace webrtc {
@@ -24,7 +28,7 @@
 #if defined(_WIN32)
     return new EventWindows();
 #else
-    return EventLinux::Create();
+    return EventPosix::Create();
 #endif
 }
 
@@ -45,6 +49,21 @@
     {
         return 0;
     }
+#elif defined(WEBRTC_MAC_INTEL)
+    bool keyDown = false;
+    // loop through all Mac virtual key constant values
+    for(int keyIndex = 0; keyIndex <= 0x5C; keyIndex++) 
+    {
+        keyDown |= CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, keyIndex);
+    }
+    if(keyDown)
+    {
+        return 1;
+    }
+    else
+    {
+        return 0;
+    } 
 #else
     return -1;
 #endif
diff --git a/src/system_wrappers/source/event_linux.cc b/src/system_wrappers/source/event_posix.cc
similarity index 90%
rename from src/system_wrappers/source/event_linux.cc
rename to src/system_wrappers/source/event_posix.cc
index dddd31c..b77b902 100644
--- a/src/system_wrappers/source/event_linux.cc
+++ b/src/system_wrappers/source/event_posix.cc
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "event_linux.h"
+#include "event_posix.h"
 
 #include <errno.h>
 #include <pthread.h>
@@ -22,9 +22,9 @@
 const long int E6 = 1000000;
 const long int E9 = 1000 * E6;
 
-EventWrapper* EventLinux::Create()
+EventWrapper* EventPosix::Create()
 {
-    EventLinux* ptr = new EventLinux;
+    EventPosix* ptr = new EventPosix;
     if (!ptr)
     {
         return NULL;
@@ -40,7 +40,7 @@
 }
 
 
-EventLinux::EventLinux()
+EventPosix::EventPosix()
     : _timerThread(0),
       _timerEvent(0),
       _periodic(false),
@@ -50,7 +50,7 @@
 {
 }
 
-int EventLinux::Construct()
+int EventPosix::Construct()
 {
     // Set start time to zero
     memset(&_tCreate, 0, sizeof(_tCreate));
@@ -92,14 +92,14 @@
     return 0;
 }
 
-EventLinux::~EventLinux()
+EventPosix::~EventPosix()
 {
     StopTimer();
     pthread_cond_destroy(&cond);
     pthread_mutex_destroy(&mutex);
 }
 
-bool EventLinux::Reset()
+bool EventPosix::Reset()
 {
     if (0 != pthread_mutex_lock(&mutex))
     {
@@ -110,7 +110,7 @@
     return true;
 }
 
-bool EventLinux::Set()
+bool EventPosix::Set()
 {
     if (0 != pthread_mutex_lock(&mutex))
     {
@@ -123,7 +123,7 @@
     return true;
 }
 
-EventTypeWrapper EventLinux::Wait(unsigned long timeout)
+EventTypeWrapper EventPosix::Wait(unsigned long timeout)
 {
     int retVal = 0;
     if (0 != pthread_mutex_lock(&mutex))
@@ -178,7 +178,7 @@
     }
 }
 
-EventTypeWrapper EventLinux::Wait(timespec& tPulse)
+EventTypeWrapper EventPosix::Wait(timespec& tPulse)
 {
     int retVal = 0;
     if (0 != pthread_mutex_lock(&mutex))
@@ -205,7 +205,7 @@
     }
 }
 
-bool EventLinux::StartTimer(bool periodic, unsigned long time)
+bool EventPosix::StartTimer(bool periodic, unsigned long time)
 {
     if (_timerThread)
     {
@@ -223,7 +223,7 @@
     }
 
     // Start the timer thread
-    _timerEvent = static_cast<EventLinux*>(EventWrapper::Create());
+    _timerEvent = static_cast<EventPosix*>(EventWrapper::Create());
     const char* threadName = "WebRtc_event_timer_thread";
     _timerThread = ThreadWrapper::CreateThread(Run, this, kRealtimePriority,
                                                threadName);
@@ -237,12 +237,12 @@
     return false;
 }
 
-bool EventLinux::Run(ThreadObj obj)
+bool EventPosix::Run(ThreadObj obj)
 {
-    return static_cast<EventLinux*>(obj)->Process();
+    return static_cast<EventPosix*>(obj)->Process();
 }
 
-bool EventLinux::Process()
+bool EventPosix::Process()
 {
     if (_tCreate.tv_sec == 0)
     {
@@ -290,7 +290,7 @@
     return true;
 }
 
-bool EventLinux::StopTimer()
+bool EventPosix::StopTimer()
 {
     if(_timerThread)
     {
diff --git a/src/system_wrappers/source/event_linux.h b/src/system_wrappers/source/event_posix.h
similarity index 81%
rename from src/system_wrappers/source/event_linux.h
rename to src/system_wrappers/source/event_posix.h
index 17d193f..0e5893b 100644
--- a/src/system_wrappers/source/event_linux.h
+++ b/src/system_wrappers/source/event_posix.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_EVENT_LINUX_H_
-#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_EVENT_LINUX_H_
+#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_EVENT_POSIX_H_
+#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_EVENT_POSIX_H_
 
 #include "event_wrapper.h"
 
@@ -25,12 +25,12 @@
     kDown = 2
 };
 
-class EventLinux : public EventWrapper
+class EventPosix : public EventWrapper
 {
 public:
     static EventWrapper* Create();
 
-    virtual ~EventLinux();
+    virtual ~EventPosix();
 
     virtual EventTypeWrapper Wait(unsigned long maxTime);
     virtual bool Set();
@@ -40,7 +40,7 @@
     virtual bool StopTimer();
 
 private:
-    EventLinux();
+    EventPosix();
     int Construct();
 
     static bool Run(ThreadObj obj);
@@ -53,7 +53,7 @@
     pthread_mutex_t mutex;
 
     ThreadWrapper* _timerThread;
-    EventLinux*    _timerEvent;
+    EventPosix*    _timerEvent;
     timespec       _tCreate;
 
     bool          _periodic;
@@ -63,4 +63,4 @@
 };
 } // namespace webrtc
 
-#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_EVENT_LINUX_H_
+#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_EVENT_POSIX_H_
diff --git a/src/system_wrappers/source/file_impl.cc b/src/system_wrappers/source/file_impl.cc
index 6046c2c..d163bf6 100644
--- a/src/system_wrappers/source/file_impl.cc
+++ b/src/system_wrappers/source/file_impl.cc
@@ -10,16 +10,17 @@
 
 #include "file_impl.h"
 
-#include <cassert>
+#include <assert.h>
 
 #ifdef _WIN32
-    #include <Windows.h>
+#include <Windows.h>
 #else
-    #include <stdarg.h>
-    #include <string.h>
+#include <stdarg.h>
+#include <string.h>
 #endif
 
 namespace webrtc {
+
 FileWrapper* FileWrapper::Create()
 {
     return new FileWrapperImpl();
@@ -30,8 +31,7 @@
       _open(false),
       _looping(false),
       _readOnly(false),
-      _text(false),
-      _maxSizeInBytes(-1),
+      _maxSizeInBytes(0),
       _sizeInBytes(0)
 {
     memset(_fileNameUTF8, 0, kMaxFileNameSize);
@@ -45,7 +45,7 @@
     }
 }
 
-WebRtc_Word32 FileWrapperImpl::CloseFile()
+int FileWrapperImpl::CloseFile()
 {
     if (_id != NULL)
     {
@@ -70,13 +70,13 @@
     return -1;
 }
 
-WebRtc_Word32 FileWrapperImpl::SetMaxFileSize(WebRtc_Word32 bytes)
+int FileWrapperImpl::SetMaxFileSize(size_t bytes)
 {
     _maxSizeInBytes = bytes;
     return 0;
 }
 
-WebRtc_Word32 FileWrapperImpl::Flush()
+int FileWrapperImpl::Flush()
 {
     if (_id != NULL)
     {
@@ -85,40 +85,39 @@
     return -1;
 }
 
-WebRtc_Word32 FileWrapperImpl::FileName(WebRtc_Word8* fileNameUTF8,
-                                        WebRtc_UWord32 size) const
+int FileWrapperImpl::FileName(char* fileNameUTF8,
+                              size_t size) const
 {
-    WebRtc_Word32 len = static_cast<WebRtc_Word32>(strlen(_fileNameUTF8));
-    if(len > kMaxFileNameSize)
+    size_t length = strlen(_fileNameUTF8);
+    if(length > kMaxFileNameSize)
     {
         assert(false);
         return -1;
     }
-    if(len < 1)
+    if(length < 1)
     {
         return -1;
     }
+
     // Make sure to NULL terminate
-    if(size < (WebRtc_UWord32)len)
+    if(size < length)
     {
-        len = size - 1;
+        length = size - 1;
     }
-    memcpy(fileNameUTF8, _fileNameUTF8, len);
-    fileNameUTF8[len] = 0;
+    memcpy(fileNameUTF8, _fileNameUTF8, length);
+    fileNameUTF8[length] = 0;
     return 0;
 }
 
-bool
-FileWrapperImpl::Open() const
+bool FileWrapperImpl::Open() const
 {
     return _open;
 }
 
-WebRtc_Word32 FileWrapperImpl::OpenFile(const WebRtc_Word8 *fileNameUTF8,
-                                        const bool readOnly, const bool loop,
-                                        const bool text)
+int FileWrapperImpl::OpenFile(const char *fileNameUTF8, bool readOnly,
+                              bool loop, bool text)
 {
-    WebRtc_Word32 length = (WebRtc_Word32)strlen(fileNameUTF8);
+    size_t length = strlen(fileNameUTF8);
     if (length > kMaxFileNameSize)
     {
         return -1;
@@ -174,7 +173,7 @@
 
     if (tmpId != NULL)
     {
-        // + 1 comes fro copying the NULL termination charachter too
+        // +1 comes from copying the NULL termination character.
         memcpy(_fileNameUTF8, fileNameUTF8, length + 1);
         if (_id != NULL)
         {
@@ -188,80 +187,79 @@
     return -1;
 }
 
-int FileWrapperImpl::Read(void *buf, int len)
+int FileWrapperImpl::Read(void* buf, int length)
 {
-    if(len < 0)
-    {
-        return 0;
-    }
-    if (_id != NULL)
-    {
-        WebRtc_Word32 res = static_cast<WebRtc_Word32>(fread(buf, 1, len, _id));
-        if (res != len)
-        {
-            if(!_looping)
-            {
-                CloseFile();
-            }
-        }
-        return res;
-    }
-    return -1;
-}
-
-WebRtc_Word32 FileWrapperImpl::WriteText(const WebRtc_Word8* text, ...)
-{
-    assert(!_readOnly);
-    assert(!_text);
+    if (length < 0)
+        return -1;
 
     if (_id == NULL)
+        return -1;
+
+    int bytes_read = static_cast<int>(fread(buf, 1, length, _id));
+    if (bytes_read != length && !_looping)
     {
+        CloseFile();
+    }
+    return bytes_read;
+}
+
+int FileWrapperImpl::WriteText(const char* format, ...)
+{
+    if (format == NULL)
+        return -1;
+
+    if (_readOnly)
+        return -1;
+
+    if (_id == NULL)
+        return -1;
+
+    va_list args;
+    va_start(args, format);
+    int num_chars = vfprintf(_id, format, args);
+    va_end(args);
+
+    if (num_chars >= 0)
+    {
+        return num_chars;
+    }
+    else
+    {
+        CloseFile();
         return -1;
     }
-
-    char tempBuff[kFileMaxTextMessageSize];
-    if (text)
-    {
-        va_list args;
-        va_start(args, text);
-#ifdef _WIN32
-        _vsnprintf(tempBuff, kFileMaxTextMessageSize-1, text, args);
-#else
-        vsnprintf(tempBuff, kFileMaxTextMessageSize-1, text, args);
-#endif
-        va_end(args);
-        WebRtc_Word32 nBytes;
-        nBytes = fprintf(_id, "%s", tempBuff);
-        if (nBytes > 0)
-        {
-            return 0;
-        }
-        CloseFile();
-    }
-    return -1;
 }
 
-bool FileWrapperImpl::Write(const void* buf, int len)
+bool FileWrapperImpl::Write(const void* buf, int length)
 {
-    assert(!_readOnly);
-    if (_id != NULL)
-    {
-        // Check if it's time to stop writing.
-        if ((_maxSizeInBytes != -1) &&
-             _sizeInBytes + len > (WebRtc_UWord32)_maxSizeInBytes)
-        {
-            Flush();
-            return false;
-        }
+    if (buf == NULL)
+        return false;
 
-        size_t nBytes = fwrite((WebRtc_UWord8*)buf, 1, len, _id);
-        if (nBytes > 0)
-        {
-            _sizeInBytes += static_cast<WebRtc_Word32>(nBytes);
-            return true;
-        }
-        CloseFile();
+    if (length < 0)
+        return false;
+
+    if (_readOnly)
+        return false;
+
+    if (_id == NULL)
+        return false;
+
+    // Check if it's time to stop writing.
+    if (_maxSizeInBytes > 0 && (_sizeInBytes + length) > _maxSizeInBytes)
+    {
+        Flush();
+        return false;
     }
+
+    size_t num_bytes = fwrite(buf, 1, length, _id);
+    if (num_bytes > 0)
+    {
+        _sizeInBytes += num_bytes;
+        return true;
+    }
+
+    CloseFile();
     return false;
 }
+
 } // namespace webrtc
diff --git a/src/system_wrappers/source/file_impl.h b/src/system_wrappers/source/file_impl.h
index cf6b734..31ab31e 100644
--- a/src/system_wrappers/source/file_impl.h
+++ b/src/system_wrappers/source/file_impl.h
@@ -16,42 +16,42 @@
 #include <stdio.h>
 
 namespace webrtc {
+
 class FileWrapperImpl : public FileWrapper
 {
 public:
     FileWrapperImpl();
     virtual ~FileWrapperImpl();
 
-    virtual WebRtc_Word32 FileName(WebRtc_Word8* fileNameUTF8,
-                                   WebRtc_UWord32 size) const;
+    virtual int FileName(char* fileNameUTF8,
+                         size_t size) const;
 
     virtual bool Open() const;
 
-    virtual WebRtc_Word32 OpenFile(const WebRtc_Word8* fileNameUTF8,
-                                 const bool readOnly,
-                                 const bool loop = false,
-                                 const bool text = false);
+    virtual int OpenFile(const char* fileNameUTF8,
+                         bool readOnly,
+                         bool loop = false,
+                         bool text = false);
 
-    virtual WebRtc_Word32 CloseFile();
-    virtual WebRtc_Word32 SetMaxFileSize(WebRtc_Word32 bytes);
-    virtual WebRtc_Word32 Flush();
+    virtual int CloseFile();
+    virtual int SetMaxFileSize(size_t bytes);
+    virtual int Flush();
 
-    virtual int Read(void* buf, int len);
-    virtual bool Write(const void *buf, int len);
+    virtual int Read(void* buf, int length);
+    virtual bool Write(const void *buf, int length);
+    virtual int WriteText(const char* format, ...);
     virtual int Rewind();
 
-    virtual WebRtc_Word32 WriteText(const WebRtc_Word8* text, ...);
-
 private:
-    FILE*          _id;
-    bool           _open;
-    bool           _looping;
-    bool           _readOnly;
-    bool           _text;
-    WebRtc_Word32  _maxSizeInBytes; // -1 indicates file size limitation is off
-    WebRtc_UWord32 _sizeInBytes;
-    WebRtc_Word8   _fileNameUTF8[kMaxFileNameSize];
+    FILE* _id;
+    bool _open;
+    bool _looping;
+    bool _readOnly;
+    size_t _maxSizeInBytes; // -1 indicates file size limitation is off
+    size_t _sizeInBytes;
+    char _fileNameUTF8[kMaxFileNameSize];
 };
+
 } // namespace webrtc
 
 #endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_FILE_IMPL_H_
diff --git a/src/system_wrappers/source/list_no_stl.cc b/src/system_wrappers/source/list_no_stl.cc
index d45f27b..dbba571 100644
--- a/src/system_wrappers/source/list_no_stl.cc
+++ b/src/system_wrappers/source/list_no_stl.cc
@@ -79,7 +79,7 @@
 int ListWrapper::PushBack(const void* ptr)
 {
     ListItem* item = new ListItem(ptr);
-    CriticalSectionScoped lock(*critical_section_);
+    CriticalSectionScoped lock(critical_section_);
     PushBackImpl(item);
     return 0;
 }
@@ -87,7 +87,7 @@
 int ListWrapper::PushBack(const unsigned int item_id)
 {
     ListItem* item = new ListItem(item_id);
-    CriticalSectionScoped lock(*critical_section_);
+    CriticalSectionScoped lock(critical_section_);
     PushBackImpl(item);
     return 0;
 }
@@ -95,7 +95,7 @@
 int ListWrapper::PushFront(const unsigned int item_id)
 {
     ListItem* item = new ListItem(item_id);
-    CriticalSectionScoped lock(*critical_section_);
+    CriticalSectionScoped lock(critical_section_);
     PushFrontImpl(item);
     return 0;
 }
@@ -103,7 +103,7 @@
 int ListWrapper::PushFront(const void* ptr)
 {
     ListItem* item = new ListItem(ptr);
-    CriticalSectionScoped lock(*critical_section_);
+    CriticalSectionScoped lock(critical_section_);
     PushFrontImpl(item);
     return 0;
 }
@@ -159,7 +159,7 @@
     {
         return -1;
     }
-    CriticalSectionScoped lock(*critical_section_);
+    CriticalSectionScoped lock(critical_section_);
     if (!existing_previous_item)
     {
         PushBackImpl(new_item);
@@ -195,7 +195,7 @@
     {
         return -1;
     }
-    CriticalSectionScoped lock(*critical_section_);
+    CriticalSectionScoped lock(critical_section_);
     if (!existing_next_item)
     {
         PushBackImpl(new_item);
diff --git a/src/system_wrappers/source/list_unittest.cc b/src/system_wrappers/source/list_unittest.cc
index 3f3c88f..4d32f59 100644
--- a/src/system_wrappers/source/list_unittest.cc
+++ b/src/system_wrappers/source/list_unittest.cc
@@ -10,10 +10,12 @@
 
 #include "gtest/gtest.h"
 
-#include "list_wrapper.h"
+#include "system_wrappers/interface/list_wrapper.h"
+#include "system_wrappers/interface/scoped_ptr.h"
 
 using ::webrtc::ListWrapper;
 using ::webrtc::ListItem;
+using ::webrtc::scoped_ptr;
 
 // Note: kNumberOfElements needs to be even.
 const unsigned int kNumberOfElements = 10;
@@ -38,8 +40,6 @@
     virtual unsigned int GetUnsignedItem(
         const ListItem* item) const = 0;
     virtual ListItem* CreateListItem(unsigned int item_id) = 0;
-    virtual bool DestroyListItem(ListItem* item) = 0;
-
     unsigned int GetSize() const {
         return list_.GetSize();
     }
@@ -64,24 +64,52 @@
     }
     virtual int Erase(ListItem* item) = 0;
     int Insert(ListItem* existing_previous_item,
-                       ListItem* new_item) {
-        return list_.Insert(existing_previous_item, new_item);
+               ListItem* new_item) {
+        const int retval = list_.Insert(existing_previous_item, new_item);
+        if (retval != 0) {
+            EXPECT_TRUE(DestroyListItem(new_item));
+        }
+        return retval;
     }
 
     int InsertBefore(ListItem* existing_next_item,
-                             ListItem* new_item) {
-        return list_.InsertBefore(existing_next_item, new_item);
+                     ListItem* new_item) {
+        const int retval = list_.InsertBefore(existing_next_item, new_item);
+        if (retval != 0) {
+            EXPECT_TRUE(DestroyListItem(new_item));
+        }
+        return retval;
     }
 protected:
     ListWrapperSimple() {}
 
+    virtual bool DestroyListItemContent(ListItem* item) = 0;
+    bool DestroyListItem(ListItem* item) {
+        const bool retval = DestroyListItemContent(item);
+        delete item;
+        return retval;
+    }
+
     ListWrapper list_;
 };
 
+void ClearList(ListWrapperSimple* list_wrapper) {
+  if (list_wrapper == NULL) {
+      return;
+  }
+  ListItem* list_item = list_wrapper->First();
+  while (list_item != NULL) {
+    EXPECT_EQ(list_wrapper->Erase(list_item), 0);
+    list_item = list_wrapper->First();
+  }
+}
+
 class ListWrapperStatic : public ListWrapperSimple {
 public:
     ListWrapperStatic() {}
-    virtual ~ListWrapperStatic() {}
+    virtual ~ListWrapperStatic() {
+        ClearList(this);
+    }
 
     virtual unsigned int GetUnsignedItem(const ListItem* item) const {
         return item->GetUnsignedItem();
@@ -89,11 +117,7 @@
     virtual ListItem* CreateListItem(unsigned int item_id) {
         return new ListItem(item_id);
     }
-    virtual bool DestroyListItem(ListItem* item) {
-        if (item == NULL) {
-            return false;
-        }
-        delete item;
+    virtual bool DestroyListItemContent(ListItem* item) {
         return true;
     }
     virtual int PushBack(const unsigned int item_id) {
@@ -116,7 +140,9 @@
 class ListWrapperDynamic : public ListWrapperSimple {
 public:
     ListWrapperDynamic() {}
-    virtual ~ListWrapperDynamic() {}
+    virtual ~ListWrapperDynamic() {
+        ClearList(this);
+    }
 
     virtual unsigned int GetUnsignedItem(const ListItem* item) const {
         const unsigned int* return_value_pointer =
@@ -140,7 +166,7 @@
         }
         return return_value;
     }
-    virtual bool DestroyListItem(ListItem* item) {
+    virtual bool DestroyListItemContent(ListItem* item) {
         if (item == NULL) {
             return false;
         }
@@ -151,7 +177,6 @@
             return_value = true;
             delete item_id_ptr;
         }
-        delete item;
         return return_value;
     }
     virtual int PushBack(const unsigned int item_id) {
@@ -190,17 +215,15 @@
         if (item == NULL) {
             return -1;
         }
-        unsigned int* item_id_ptr = reinterpret_cast<unsigned int*> (
-            item->GetItem());
-        int return_value = -1;
-        if (item_id_ptr != NULL) {
-            delete item_id_ptr;
-            return_value = 0;
+        int retval = 0;
+        if (!DestroyListItemContent(item)) {
+            retval = -1;
+            ADD_FAILURE();
         }
         if (list_.Erase(item) != 0) {
-            return -1;
+            retval = -1;
         }
-        return return_value;
+        return retval;
     }
 };
 
@@ -212,15 +235,6 @@
     return new ListWrapperDynamic();
 }
 
-void ClearList(ListWrapperSimple* list) {
-    if (list == NULL)
-    {
-        return;
-    }
-    while (list->Erase(list->First()) == 0) {
-    }
-}
-
 ListWrapperSimple* CreateAscendingList(bool static_allocation) {
     ListWrapperSimple* return_value = ListWrapperSimple::Create(
         static_allocation);
@@ -237,7 +251,7 @@
     return return_value;
 }
 
-ListWrapperSimple* CreateDecendingList(bool static_allocation) {
+ListWrapperSimple* CreateDescendingList(bool static_allocation) {
     ListWrapperSimple* return_value = ListWrapperSimple::Create(
         static_allocation);
     if (return_value == NULL) {
@@ -323,17 +337,20 @@
 TEST(ListWrapperTest,ReverseNewIntList) {
     // Create a new temporary list with elements reversed those of
     // new_int_list_
-    const ListWrapperSimple* decending_list = CreateDecendingList(rand()%2);
-    ASSERT_FALSE(decending_list == NULL);
-    ASSERT_FALSE(decending_list->Empty());
-    ASSERT_EQ(kNumberOfElements,decending_list->GetSize());
+    const scoped_ptr<ListWrapperSimple> descending_list(
+        CreateDescendingList(rand()%2));
+    ASSERT_FALSE(descending_list.get() == NULL);
+    ASSERT_FALSE(descending_list->Empty());
+    ASSERT_EQ(kNumberOfElements,descending_list->GetSize());
 
-    const ListWrapperSimple* ascending_list = CreateAscendingList(rand()%2);
-    ASSERT_FALSE(ascending_list == NULL);
+    const scoped_ptr<ListWrapperSimple> ascending_list(
+        CreateAscendingList(rand()%2));
+    ASSERT_FALSE(ascending_list.get() == NULL);
     ASSERT_FALSE(ascending_list->Empty());
     ASSERT_EQ(kNumberOfElements,ascending_list->GetSize());
 
-    ListWrapperSimple* list_to_reverse = ListWrapperSimple::Create(rand()%2);
+    scoped_ptr<ListWrapperSimple> list_to_reverse(
+        ListWrapperSimple::Create(rand()%2));
 
     // Reverse the list using PushBack and Previous.
     for (ListItem* item = ascending_list->Last(); item != NULL;
@@ -341,98 +358,97 @@
          list_to_reverse->PushBack(ascending_list->GetUnsignedItem(item));
     }
 
-    ASSERT_TRUE(CompareLists(decending_list,list_to_reverse));
+    ASSERT_TRUE(CompareLists(descending_list.get(), list_to_reverse.get()));
 
-    ListWrapperSimple* list_to_un_reverse =
-        ListWrapperSimple::Create(rand()%2);
-    ASSERT_FALSE(list_to_un_reverse == NULL);
+    scoped_ptr<ListWrapperSimple> list_to_un_reverse(
+        ListWrapperSimple::Create(rand()%2));
+    ASSERT_FALSE(list_to_un_reverse.get() == NULL);
     // Reverse the reversed list using PushFront and Next.
     for (ListItem* item = list_to_reverse->First(); item != NULL;
          item = list_to_reverse->Next(item)) {
          list_to_un_reverse->PushFront(list_to_reverse->GetUnsignedItem(item));
     }
-
-    ASSERT_TRUE(CompareLists(ascending_list,list_to_un_reverse));
+    ASSERT_TRUE(CompareLists(ascending_list.get(), list_to_un_reverse.get()));
 }
 
 TEST(ListWrapperTest,PopTest) {
-    ListWrapperSimple* ascending_list = CreateAscendingList(rand()%2);
-    ASSERT_FALSE(ascending_list == NULL);
+    scoped_ptr<ListWrapperSimple> ascending_list(CreateAscendingList(rand()%2));
+    ASSERT_FALSE(ascending_list.get() == NULL);
     ASSERT_FALSE(ascending_list->Empty());
-    EXPECT_EQ(0,ascending_list->PopFront());
-    EXPECT_EQ(1,ascending_list->GetUnsignedItem(ascending_list->First()));
-    
-    EXPECT_EQ(0,ascending_list->PopBack());
-    EXPECT_EQ(kNumberOfElements - 2,ascending_list->GetUnsignedItem(
+    EXPECT_EQ(0, ascending_list->PopFront());
+    EXPECT_EQ(1U, ascending_list->GetUnsignedItem(ascending_list->First()));
+
+    EXPECT_EQ(0, ascending_list->PopBack());
+    EXPECT_EQ(kNumberOfElements - 2, ascending_list->GetUnsignedItem(
               ascending_list->Last()));
     EXPECT_EQ(kNumberOfElements - 2, ascending_list->GetSize());
 }
 
 // Use Insert to interleave two lists.
 TEST(ListWrapperTest,InterLeaveTest) {
-    ListWrapperSimple* interleave_list = CreateAscendingList(rand()%2);
-    ASSERT_FALSE(interleave_list == NULL);
+    scoped_ptr<ListWrapperSimple> interleave_list(
+        CreateAscendingList(rand()%2));
+    ASSERT_FALSE(interleave_list.get() == NULL);
     ASSERT_FALSE(interleave_list->Empty());
 
-    ListWrapperSimple* decending_list = CreateDecendingList(rand()%2);
-    ASSERT_FALSE(decending_list == NULL);
+    scoped_ptr<ListWrapperSimple> descending_list(
+        CreateDescendingList(rand()%2));
+    ASSERT_FALSE(descending_list.get() == NULL);
 
-    for (int i = 0; i < kNumberOfElements/2; ++i) {
-        ASSERT_EQ(0,interleave_list->PopBack());
-        ASSERT_EQ(0,decending_list->PopBack());
+    for (unsigned int i = 0; i < kNumberOfElements/2; ++i) {
+        ASSERT_EQ(0, interleave_list->PopBack());
+        ASSERT_EQ(0, descending_list->PopBack());
     }
-    ASSERT_EQ(kNumberOfElements/2,interleave_list->GetSize());
-    ASSERT_EQ(kNumberOfElements/2,decending_list->GetSize());
+    ASSERT_EQ(kNumberOfElements/2, interleave_list->GetSize());
+    ASSERT_EQ(kNumberOfElements/2, descending_list->GetSize());
 
-    int insert_position = kNumberOfElements/2;
+    unsigned int insert_position = kNumberOfElements/2;
     ASSERT_EQ(insert_position * 2, kNumberOfElements);
-    while (!decending_list->Empty())
+    while (!descending_list->Empty())
     {
-        ListItem* item = decending_list->Last();
+        ListItem* item = descending_list->Last();
         ASSERT_FALSE(item == NULL);
 
-        const unsigned int item_id = decending_list->GetUnsignedItem(item);
-        ASSERT_EQ(0,decending_list->Erase(item));
+        const unsigned int item_id = descending_list->GetUnsignedItem(item);
+        ASSERT_EQ(0, descending_list->Erase(item));
 
         ListItem* insert_item = interleave_list->CreateListItem(item_id);
         ASSERT_FALSE(insert_item == NULL);
         item = interleave_list->First();
         ASSERT_FALSE(item == NULL);
-        for (int j = 0; j < insert_position - 1; ++j) {
+        for (unsigned int j = 0; j < insert_position - 1; ++j) {
             item = interleave_list->Next(item);
             ASSERT_FALSE(item == NULL);
         }
-        if (0 != interleave_list->Insert(item,insert_item)) {
-            interleave_list->DestroyListItem(insert_item);
-            FAIL();
-        }
+        EXPECT_EQ(0, interleave_list->Insert(item, insert_item));
         --insert_position;
     }
-    
-    ListWrapperSimple* interleaved_list = CreateInterleavedList(rand()%2);
-    ASSERT_FALSE(interleaved_list == NULL);
-    ASSERT_FALSE(interleaved_list->Empty());
 
-    ASSERT_TRUE(CompareLists(interleaved_list,interleave_list));
+    scoped_ptr<ListWrapperSimple> interleaved_list(
+        CreateInterleavedList(rand()%2));
+    ASSERT_FALSE(interleaved_list.get() == NULL);
+    ASSERT_FALSE(interleaved_list->Empty());
+    ASSERT_TRUE(CompareLists(interleaved_list.get(), interleave_list.get()));
 }
 
 // Use InsertBefore to interleave two lists.
 TEST(ListWrapperTest,InterLeaveTestII) {
-    ListWrapperSimple* interleave_list = CreateDecendingList(rand()%2);
-    ASSERT_FALSE(interleave_list == NULL);
+    scoped_ptr<ListWrapperSimple> interleave_list(
+        CreateDescendingList(rand()%2));
+    ASSERT_FALSE(interleave_list.get() == NULL);
     ASSERT_FALSE(interleave_list->Empty());
 
-    ListWrapperSimple* ascending_list = CreateAscendingList(rand()%2);
-    ASSERT_FALSE(ascending_list == NULL);
+    scoped_ptr<ListWrapperSimple> ascending_list(CreateAscendingList(rand()%2));
+    ASSERT_FALSE(ascending_list.get() == NULL);
 
-    for (int i = 0; i < kNumberOfElements/2; ++i) {
-        ASSERT_EQ(0,interleave_list->PopBack());
-        ASSERT_EQ(0,ascending_list->PopBack());
+    for (unsigned int i = 0; i < kNumberOfElements/2; ++i) {
+        ASSERT_EQ(0, interleave_list->PopBack());
+        ASSERT_EQ(0, ascending_list->PopBack());
     }
-    ASSERT_EQ(kNumberOfElements/2,interleave_list->GetSize());
-    ASSERT_EQ(kNumberOfElements/2,ascending_list->GetSize());
+    ASSERT_EQ(kNumberOfElements/2, interleave_list->GetSize());
+    ASSERT_EQ(kNumberOfElements/2, ascending_list->GetSize());
 
-    int insert_position = kNumberOfElements/2;
+    unsigned int insert_position = kNumberOfElements/2;
     ASSERT_EQ(insert_position * 2, kNumberOfElements);
     while (!ascending_list->Empty())
     {
@@ -446,30 +462,18 @@
         ASSERT_FALSE(insert_item == NULL);
         item = interleave_list->First();
         ASSERT_FALSE(item == NULL);
-        for (int j = 0; j < insert_position - 1; ++j) {
+        for (unsigned int j = 0; j < insert_position - 1; ++j) {
             item = interleave_list->Next(item);
             ASSERT_FALSE(item == NULL);
         }
-        if (0 != interleave_list->InsertBefore(item,insert_item)) {
-            interleave_list->DestroyListItem(insert_item);
-            FAIL();
-        }
+        EXPECT_EQ(interleave_list->InsertBefore(item, insert_item), 0);
         --insert_position;
     }
 
-    ListWrapperSimple* interleaved_list = CreateInterleavedList(rand()%2);
-    ASSERT_FALSE(interleaved_list == NULL);
+    scoped_ptr<ListWrapperSimple> interleaved_list(
+        CreateInterleavedList(rand()%2));
+    ASSERT_FALSE(interleaved_list.get() == NULL);
     ASSERT_FALSE(interleaved_list->Empty());
 
-    ASSERT_TRUE(CompareLists(interleaved_list,interleave_list));
-}
-
-int main(int argc, char **argv)
-{
-    ::testing::InitGoogleTest(&argc, argv);
-    // Added return_value so that it's convenient to put a breakpoint before
-    // exiting please note that the return value from RUN_ALL_TESTS() must
-    // be returned by the main function.
-    const int return_value = RUN_ALL_TESTS();
-    return return_value;
+    ASSERT_TRUE(CompareLists(interleaved_list.get(), interleave_list.get()));
 }
diff --git a/src/system_wrappers/source/map.cc b/src/system_wrappers/source/map.cc
index 0bff155..331da32 100644
--- a/src/system_wrappers/source/map.cc
+++ b/src/system_wrappers/source/map.cc
@@ -13,7 +13,7 @@
 #include "trace.h"
 
 namespace webrtc {
-MapItem::MapItem(int id, void* item) : item_pointer_(item), item_id_(id)
+MapItem::MapItem(int id, void* item) : item_id_(id), item_pointer_(item)
 {
 }
 
diff --git a/src/system_wrappers/source/map_no_stl.cc b/src/system_wrappers/source/map_no_stl.cc
index cb0ac00..ef93a1f 100644
--- a/src/system_wrappers/source/map_no_stl.cc
+++ b/src/system_wrappers/source/map_no_stl.cc
@@ -75,7 +75,7 @@
 {
     MapNoStlItem* new_item = new MapNoStlItem(id, ptr);
 
-    CriticalSectionScoped lock(*critical_section_);
+    CriticalSectionScoped lock(critical_section_);
     MapNoStlItem* item = first_;
     size_++;
     if (!item)
@@ -144,7 +144,7 @@
 
 MapNoStlItem* MapNoStl::Find(int id) const
 {
-    CriticalSectionScoped lock(*critical_section_);
+    CriticalSectionScoped lock(critical_section_);
     MapNoStlItem* item = Locate(id);
     return item;
 }
@@ -155,13 +155,13 @@
     {
         return -1;
     }
-    CriticalSectionScoped lock(*critical_section_);
+    CriticalSectionScoped lock(critical_section_);
     return Remove(item);
 }
 
 int MapNoStl::Erase(const int id)
 {
-    CriticalSectionScoped lock(*critical_section_);
+    CriticalSectionScoped lock(critical_section_);
     MapNoStlItem* item = Locate(id);
     if(!item)
     {
diff --git a/src/system_wrappers/source/map_unittest.cc b/src/system_wrappers/source/map_unittest.cc
index 8e8ea07..1c85a92 100644
--- a/src/system_wrappers/source/map_unittest.cc
+++ b/src/system_wrappers/source/map_unittest.cc
@@ -15,7 +15,7 @@
 using ::webrtc::MapWrapper;
 using ::webrtc::MapItem;
 
-const unsigned int kNumberOfElements = 10;
+const int kNumberOfElements = 10;
 
 int* ItemPointer(MapItem* item) {
     if (item == NULL) {
@@ -48,7 +48,7 @@
     if (supress) {
         return;
     }
-    printf(message);
+    printf("%s", message);
 }
 
 bool CreateAscendingMap(MapWrapper* ascending_map) {
diff --git a/src/system_wrappers/source/rw_lock.cc b/src/system_wrappers/source/rw_lock.cc
index 47901d3..b308358 100644
--- a/src/system_wrappers/source/rw_lock.cc
+++ b/src/system_wrappers/source/rw_lock.cc
@@ -13,12 +13,12 @@
 #include <assert.h>
 
 #if defined(_WIN32)
-    #include "rw_lock_windows.h"
-#elif defined(ANDROID)
+    #include "rw_lock_win.h"
+#elif defined(WEBRTC_ANDROID)
     #include <stdlib.h>
     #include "rw_lock_generic.h"
 #else
-    #include "rw_lock_linux.h"
+    #include "rw_lock_posix.h"
 #endif
 
 namespace webrtc {
@@ -26,10 +26,10 @@
 {
 #ifdef _WIN32
     RWLockWrapper* lock =  new RWLockWindows();
-#elif defined(ANDROID)
+#elif defined(WEBRTC_ANDROID)
     RWLockWrapper* lock =  new RWLockWrapperGeneric();
 #else
-    RWLockWrapper* lock =  new RWLockLinux();
+    RWLockWrapper* lock =  new RWLockPosix();
 #endif
     if(lock->Init() != 0)
     {
diff --git a/src/system_wrappers/source/rw_lock_linux.cc b/src/system_wrappers/source/rw_lock_posix.cc
similarity index 72%
rename from src/system_wrappers/source/rw_lock_linux.cc
rename to src/system_wrappers/source/rw_lock_posix.cc
index 084dce8..81a161e 100644
--- a/src/system_wrappers/source/rw_lock_linux.cc
+++ b/src/system_wrappers/source/rw_lock_posix.cc
@@ -8,39 +8,39 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "rw_lock_linux.h"
+#include "rw_lock_posix.h"
 
 namespace webrtc {
-RWLockLinux::RWLockLinux() : _lock()
+RWLockPosix::RWLockPosix() : _lock()
 {
 }
 
-RWLockLinux::~RWLockLinux()
+RWLockPosix::~RWLockPosix()
 {
     pthread_rwlock_destroy(&_lock);
 }
 
-int RWLockLinux::Init()
+int RWLockPosix::Init()
 {
     return pthread_rwlock_init(&_lock, 0);
 }
 
-void RWLockLinux::AcquireLockExclusive()
+void RWLockPosix::AcquireLockExclusive()
 {
     pthread_rwlock_wrlock(&_lock);
 }
 
-void RWLockLinux::ReleaseLockExclusive()
+void RWLockPosix::ReleaseLockExclusive()
 {
     pthread_rwlock_unlock(&_lock);
 }
 
-void RWLockLinux::AcquireLockShared()
+void RWLockPosix::AcquireLockShared()
 {
     pthread_rwlock_rdlock(&_lock);
 }
 
-void RWLockLinux::ReleaseLockShared()
+void RWLockPosix::ReleaseLockShared()
 {
     pthread_rwlock_unlock(&_lock);
 }
diff --git a/src/system_wrappers/source/rw_lock_linux.h b/src/system_wrappers/source/rw_lock_posix.h
similarity index 74%
rename from src/system_wrappers/source/rw_lock_linux.h
rename to src/system_wrappers/source/rw_lock_posix.h
index 391ee8f..929bbb8 100644
--- a/src/system_wrappers/source/rw_lock_linux.h
+++ b/src/system_wrappers/source/rw_lock_posix.h
@@ -8,19 +8,19 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_RW_LOCK_LINUX_H_
-#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_RW_LOCK_LINUX_H_
+#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_RW_LOCK_POSIX_H_
+#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_RW_LOCK_POSIX_H_
 
 #include "rw_lock_wrapper.h"
 
 #include <pthread.h>
 
 namespace webrtc {
-class RWLockLinux : public RWLockWrapper
+class RWLockPosix : public RWLockWrapper
 {
 public:
-    RWLockLinux();
-    virtual ~RWLockLinux();
+    RWLockPosix();
+    virtual ~RWLockPosix();
 
     virtual void AcquireLockExclusive();
     virtual void ReleaseLockExclusive();
@@ -36,4 +36,4 @@
 };
 } // namespace webrtc
 
-#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_RW_LOCK_LINUX_H_
+#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_RW_LOCK_POSIX_H_
diff --git a/src/system_wrappers/source/system_wrappers.gyp b/src/system_wrappers/source/system_wrappers.gyp
index 0448941..ce2438f 100644
--- a/src/system_wrappers/source/system_wrappers.gyp
+++ b/src/system_wrappers/source/system_wrappers.gyp
@@ -1,13 +1,13 @@
-# Copyright (c) 2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# TODO: Rename files to use *_linux.cpp etc. names, to automatically include relevant files. Remove conditions section.
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
 
 {
-  'includes': [
-    '../../common_settings.gypi', # Common settings
-  ],
+  'includes': [ '../../build/common.gypi', ],
   'targets': [
     {
       'target_name': 'system_wrappers',
@@ -25,15 +25,24 @@
         '../interface/aligned_malloc.h',
         '../interface/atomic32_wrapper.h',
         '../interface/condition_variable_wrapper.h',
+        '../interface/cpu_info.h',
         '../interface/cpu_wrapper.h',
         '../interface/cpu_features_wrapper.h',
         '../interface/critical_section_wrapper.h',
+        '../interface/data_log.h',
+        '../interface/data_log_c.h',
+        '../interface/data_log_impl.h',
         '../interface/event_wrapper.h',
         '../interface/file_wrapper.h',
+        '../interface/fix_interlocked_exchange_pointer_win.h',
         '../interface/list_wrapper.h',
         '../interface/map_wrapper.h',
+        '../interface/ref_count.h',
         '../interface/rw_lock_wrapper.h',
+        '../interface/scoped_ptr.h',
+        '../interface/scoped_refptr.h',
         '../interface/sort.h',
+        '../interface/static_instance.h',
         '../interface/thread_wrapper.h',
         '../interface/tick_util.h',
         '../interface/trace.h',
@@ -41,102 +50,134 @@
         'atomic32.cc',
         'atomic32_linux.h',
         'atomic32_mac.h',
-        'atomic32_windows.h',
+        'atomic32_win.h',
         'condition_variable.cc',
-        'condition_variable_linux.h',
-        'condition_variable_windows.h',
+        'condition_variable_posix.cc',
+        'condition_variable_posix.h',
+        'condition_variable_win.cc',
+        'condition_variable_win.h',
         'cpu.cc',
+        'cpu_no_op.cc',
+        'cpu_info.cc',
+        'cpu_linux.cc',
         'cpu_linux.h',
+        'cpu_mac.cc',
         'cpu_mac.h',
-        'cpu_windows.h',
+        'cpu_win.cc',
+        'cpu_win.h',
         'cpu_features.cc',
         'critical_section.cc',
-        'critical_section_linux.h',
-        'critical_section_windows.h',
+        'critical_section_posix.cc',
+        'critical_section_posix.h',
+        'critical_section_win.cc',
+        'critical_section_win.h',
+        'data_log.cc',
+        'data_log_c.cc',
+        'data_log_no_op.cc',
         'event.cc',
-        'event_linux.h',
-        'event_windows.h',
+        'event_posix.cc',
+        'event_posix.h',
+        'event_win.cc',
+        'event_win.h',
         'file_impl.cc',
         'file_impl.h',
         'list_no_stl.cc',
         'map.cc',
         'rw_lock.cc',
-        'rw_lock_linux.h',
-        'rw_lock_windows.h',
+        'rw_lock_posix.cc',
+        'rw_lock_posix.h',
+        'rw_lock_win.cc',
+        'rw_lock_win.h',
         'sort.cc',
         'thread.cc',
-        'thread_linux.h',
-        'thread_windows.h',
+        'thread_posix.cc',
+        'thread_posix.h',
+        'thread_win.cc',
+        'thread_win.h',
+        'set_thread_name_win.h',
         'trace_impl.cc',
         'trace_impl.h',
-        'trace_linux.h',
-        'trace_windows.h',
+        'trace_impl_no_op.cc',
+        'trace_posix.cc',
+        'trace_posix.h',
+        'trace_win.cc',
+        'trace_win.h',
       ],
       'conditions': [
+        ['enable_data_logging==1', {
+          'sources!': [ 'data_log_no_op.cc', ],
+        },{
+          'sources!': [ 'data_log.cc', ],
+        },],
         ['OS=="linux"', {
-          'sources': [
-            'condition_variable_linux.cc',
-            'cpu_linux.cc',
-            'critical_section_linux.cc',
-            'event_linux.cc',
-            'thread_linux.cc',
-            'trace_linux.cc',
-            'rw_lock_linux.cc',
-          ],
           'link_settings': {
-            'libraries': [
-              '-lrt',
-            ],
+            'libraries': [ '-lrt', ],
           },
         }],
         ['OS=="mac"', {
-          'sources': [
-            'condition_variable_linux.cc',
-            'cpu_mac.cc',
-            'critical_section_linux.cc',
-            'event_linux.cc',
-            'rw_lock_linux.cc',
-            'thread_linux.cc',
-            'trace_linux.cc',
-          ],
-        }],
-        ['OS=="win"', {
-          'sources': [
-            'atomic32_windows.h',
-            'condition_variable_windows.cc',
-            'condition_variable_windows.h',
-            'cpu_windows.cc',
-            'cpu_windows.h',
-            'critical_section_windows.cc',
-            'critical_section_windows.h',
-            'event_windows.cc',
-            'event_windows.h',
-            'rw_lock_windows.cc',
-            'rw_lock_windows.h',
-            'thread_windows.cc',
-            'thread_windows.h',
-            'trace_windows.cc',
-            'trace_windows.h',
-          ],
           'link_settings': {
-            'libraries': [
-              '-lwinmm.lib',
-            ],
+            'libraries': [ '$(SDKROOT)/System/Library/Frameworks/ApplicationServices.framework', ],
           },
         }],
-	    ] # conditions
-    },
-    {
-      'target_name': 'system_wrappersTest',
-      'type': 'executable',
-      'dependencies': [
-        'system_wrappers'
-      ],
-      'sources': [
-        '../test/Test.cpp',
-      ],
+        ['OS=="win"', {
+          'link_settings': {
+            'libraries': [ '-lwinmm.lib', ],
+          },
+        }],
+        ['build_with_chromium==1', {
+          'sources!': [
+            'cpu.cc',
+            'cpu_linux.h',
+            'cpu_mac.h',
+            'cpu_win.h',
+            'trace_impl.cc',
+            'trace_impl.h',
+            'trace_posix.cc',
+            'trace_posix.h',
+            'trace_win.cc',
+            'trace_win.h',
+          ],
+        }, {
+          'sources!': [
+            'cpu_no_op.cc',
+            'trace_impl_no_op.cc',
+          ],
+        }]
+      ] # conditions
     },
   ], # targets
+  'conditions': [
+    ['build_with_chromium==0', {
+      'targets': [
+        {
+          'target_name': 'system_wrappers_unittests',
+          'type': 'executable',
+          'dependencies': [
+            'system_wrappers',
+            '<(webrtc_root)/../testing/gtest.gyp:gtest',
+            '<(webrtc_root)/../test/test.gyp:test_support_main',
+          ],
+          'sources': [
+            'cpu_wrapper_unittest.cc',
+            'list_unittest.cc',
+            'map_unittest.cc',
+            'data_log_unittest.cc',
+            'data_log_unittest_disabled.cc',
+            'data_log_helpers_unittest.cc',
+            'data_log_c_helpers_unittest.c',
+            'data_log_c_helpers_unittest.h',
+          ],
+          'conditions': [
+            ['enable_data_logging==1', {
+              'sources!': [ 'data_log_unittest_disabled.cc', ],
+            }, {
+              'sources!': [ 'data_log_unittest.cc', ],
+            }],
+          ],
+        },
+      ], # targets
+    }], # build_with_chromium
+  ], # conditions
 }
 
 # Local Variables:
diff --git a/src/system_wrappers/source/system_wrappers_tests.gyp b/src/system_wrappers/source/system_wrappers_tests.gyp
deleted file mode 100644
index 856f0c1..0000000
--- a/src/system_wrappers/source/system_wrappers_tests.gyp
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-{
-  'includes': [
-    '../../common_settings.gypi',
-  ],
-  'targets': [
-    {
-      'target_name': 'unittest',
-      'type': 'executable',
-      'dependencies': [
-        '../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
-        '../../../testing/gtest.gyp:gtest',
-        '../../../testing/gtest.gyp:gtest_main',
-      ],
-      'include_dirs': [
-        '../../../testing/gtest/include',
-      ],
-      'sources': [
-        'list_unittest.cc',
-        'map_unittest.cc',
-      ],
-    },
-  ],
-}
-
-# Local Variables:
-# tab-width:2
-# indent-tabs-mode:nil
-# End:
-# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/system_wrappers/source/thread.cc b/src/system_wrappers/source/thread.cc
index a136cba..32dcc63 100644
--- a/src/system_wrappers/source/thread.cc
+++ b/src/system_wrappers/source/thread.cc
@@ -11,9 +11,9 @@
 #include "thread_wrapper.h"
 
 #if defined(_WIN32)
-    #include "thread_windows.h"
+    #include "thread_win.h"
 #else
-    #include "thread_linux.h"
+    #include "thread_posix.h"
 #endif
 
 namespace webrtc {
@@ -24,7 +24,7 @@
 #if defined(_WIN32)
     return new ThreadWindows(func, obj, prio, threadName);
 #else
-    return ThreadLinux::Create(func, obj, prio, threadName);
+    return ThreadPosix::Create(func, obj, prio, threadName);
 #endif
 }
 } // namespace webrtc
diff --git a/src/system_wrappers/source/thread_linux.cc b/src/system_wrappers/source/thread_posix.cc
similarity index 89%
rename from src/system_wrappers/source/thread_linux.cc
rename to src/system_wrappers/source/thread_posix.cc
index 1281c1b..eb0e8f4 100644
--- a/src/system_wrappers/source/thread_linux.cc
+++ b/src/system_wrappers/source/thread_posix.cc
@@ -8,7 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "thread_linux.h"
+#include "thread_posix.h"
 
 #include <errno.h>
 #include <string.h> // strncpy
@@ -30,12 +30,12 @@
 {
     static void* StartThread(void* lpParameter)
     {
-        static_cast<ThreadLinux*>(lpParameter)->Run();
+        static_cast<ThreadPosix*>(lpParameter)->Run();
         return 0;
     }
 }
 
-#if (defined(WEBRTC_LINUX) && !defined(ANDROID))
+#if (defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID))
 static pid_t gettid()
 {
 #if defined(__NR_gettid)
@@ -46,10 +46,10 @@
 }
 #endif
 
-ThreadWrapper* ThreadLinux::Create(ThreadRunFunction func, ThreadObj obj,
+ThreadWrapper* ThreadPosix::Create(ThreadRunFunction func, ThreadObj obj,
                                    ThreadPriority prio, const char* threadName)
 {
-    ThreadLinux* ptr = new ThreadLinux(func, obj, prio, threadName);
+    ThreadPosix* ptr = new ThreadPosix(func, obj, prio, threadName);
     if (!ptr)
     {
         return NULL;
@@ -63,7 +63,7 @@
     return ptr;
 }
 
-ThreadLinux::ThreadLinux(ThreadRunFunction func, ThreadObj obj,
+ThreadPosix::ThreadPosix(ThreadRunFunction func, ThreadObj obj,
                          ThreadPriority prio, const char* threadName)
     : _runFunction(func),
       _obj(obj),
@@ -83,10 +83,10 @@
     }
 }
 
-int ThreadLinux::Construct()
+int ThreadPosix::Construct()
 {
     int result = 0;
-#if !defined(ANDROID)
+#if !defined(WEBRTC_ANDROID)
     // Enable immediate cancellation if requested, see Shutdown()
     result = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
     if (result != 0)
@@ -108,7 +108,7 @@
     return 0;
 }
 
-ThreadLinux::~ThreadLinux()
+ThreadPosix::~ThreadPosix()
 {
     pthread_attr_destroy(&_attr);
     delete _event;
@@ -118,9 +118,9 @@
                       !defined(WEBRTC_MAC) && !defined(WEBRTC_MAC_INTEL) && \
                       !defined(MAC_DYLIB)  && !defined(MAC_INTEL_DYLIB)
 #if HAS_THREAD_ID
-bool ThreadLinux::Start(unsigned int& threadID)
+bool ThreadPosix::Start(unsigned int& threadID)
 #else
-bool ThreadLinux::Start(unsigned int& /*threadID*/)
+bool ThreadPosix::Start(unsigned int& /*threadID*/)
 #endif
 {
     if (!_runFunction)
@@ -191,8 +191,8 @@
     return true;
 }
 
-#if (defined(WEBRTC_LINUX) && !defined(ANDROID))
-bool ThreadLinux::SetAffinity(const int* processorNumbers,
+#if (defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID))
+bool ThreadPosix::SetAffinity(const int* processorNumbers,
                               const unsigned int amountOfProcessors)
 {
     if (!processorNumbers || (amountOfProcessors == 0))
@@ -222,20 +222,20 @@
 // NOTE: On Mac OS X, use the Thread affinity API in
 // /usr/include/mach/thread_policy.h: thread_policy_set and mach_thread_self()
 // instead of Linux gettid() syscall.
-bool ThreadLinux::SetAffinity(const int* , const unsigned int)
+bool ThreadPosix::SetAffinity(const int* , const unsigned int)
 {
     return false;
 }
 #endif
 
-void ThreadLinux::SetNotAlive()
+void ThreadPosix::SetNotAlive()
 {
     _alive = false;
 }
 
-bool ThreadLinux::Shutdown()
+bool ThreadPosix::Shutdown()
 {
-#if !defined(ANDROID)
+#if !defined(WEBRTC_ANDROID)
     if (_thread && (0 != pthread_cancel(_thread)))
     {
         return false;
@@ -247,7 +247,7 @@
 #endif
 }
 
-bool ThreadLinux::Stop()
+bool ThreadPosix::Stop()
 {
     _alive = false;
 
@@ -270,7 +270,7 @@
     }
 }
 
-void ThreadLinux::Run()
+void ThreadPosix::Run()
 {
     _alive = true;
     _dead  = false;
diff --git a/src/system_wrappers/source/thread_linux.h b/src/system_wrappers/source/thread_posix.h
similarity index 85%
rename from src/system_wrappers/source/thread_linux.h
rename to src/system_wrappers/source/thread_posix.h
index 3e2b908..f664a52 100644
--- a/src/system_wrappers/source/thread_linux.h
+++ b/src/system_wrappers/source/thread_posix.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_THREAD_LINUX_H_
-#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_THREAD_LINUX_H_
+#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_THREAD_POSIX_H_
+#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_THREAD_POSIX_H_
 
 #include "thread_wrapper.h"
 #include <pthread.h>
@@ -17,15 +17,15 @@
 namespace webrtc {
 class EventWrapper;
 
-class ThreadLinux : public ThreadWrapper
+class ThreadPosix : public ThreadWrapper
 {
 public:
     static ThreadWrapper* Create(ThreadRunFunction func, ThreadObj obj,
                                  ThreadPriority prio, const char* threadName);
 
-    ThreadLinux(ThreadRunFunction func, ThreadObj obj, ThreadPriority prio,
+    ThreadPosix(ThreadRunFunction func, ThreadObj obj, ThreadPriority prio,
                 const char* threadName);
-    ~ThreadLinux();
+    ~ThreadPosix();
 
     // From ThreadWrapper
     virtual void SetNotAlive();
@@ -66,4 +66,4 @@
 };
 } // namespace webrtc
 
-#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_THREAD_LINUX_H_
+#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_THREAD_POSIX_H_
diff --git a/src/system_wrappers/source/trace_impl.cc b/src/system_wrappers/source/trace_impl.cc
index 0a5f9db..1156519 100644
--- a/src/system_wrappers/source/trace_impl.cc
+++ b/src/system_wrappers/source/trace_impl.cc
@@ -14,13 +14,12 @@
 #include <string.h> // memset
 
 #ifdef _WIN32
-#include "trace_windows.h"
-#include "fix_interlocked_exchange_pointer_windows.h"
+#include "trace_win.h"
 #else
 #include <stdio.h>
 #include <time.h>
 #include <stdarg.h>
-#include "trace_linux.h"
+#include "trace_posix.h"
 #endif // _WIN32
 
 #define KEY_LEN_CHARS 31
@@ -35,176 +34,41 @@
 static WebRtc_UWord32 levelFilter = kTraceDefault;
 
 // Construct On First Use idiom. Avoids "static initialization order fiasco".
-Trace* TraceImpl::StaticInstance(TraceCount inc, const TraceLevel level)
+TraceImpl* TraceImpl::StaticInstance(CountOperation count_operation,
+                                     const TraceLevel level)
 {
-    // TODO (hellner): use atomic wrapper instead.
-    static volatile long theTraceCount = 0;
-    static Trace* volatile theTrace = NULL;
-
-    TraceCreate state = WEBRTC_TRACE_EXIST;
-
-    // Sanitys to avoid taking lock unless absolutely necessary (for
-    // performance reasons). inc == WEBRTC_TRACE_INC_NO_CREATE) implies that
-    // a message will be written to file.
-    if(level != kTraceAll && inc == WEBRTC_TRACE_INC_NO_CREATE)
+    // Sanities to avoid taking lock unless absolutely necessary (for
+    // performance reasons).
+    // count_operation == kAddRefNoCreate implies that a message will be
+    // written to file.
+    if((level != kTraceAll) && (count_operation == kAddRefNoCreate))
     {
         if(!(level & levelFilter))
         {
             return NULL;
         }
     }
-
-#ifndef _WIN32
-    // TODO (pwestin): crtiSect is never reclaimed. Fix memory leak.
-    static CriticalSectionWrapper* crtiSect(
-        CriticalSectionWrapper::CreateCriticalSection());
-    CriticalSectionScoped lock(*crtiSect);
-
-    if(inc == WEBRTC_TRACE_INC_NO_CREATE && theTraceCount == 0)
-    {
-        return NULL;
-    }
-
-    if(inc == WEBRTC_TRACE_INC || inc == WEBRTC_TRACE_INC_NO_CREATE)
-    {
-        theTraceCount++;
-        if(theTraceCount == 1)
-        {
-            state = WEBRTC_TRACE_CREATE;
-        }
-    } else {
-        theTraceCount--;
-        if(theTraceCount == 0)
-        {
-            state = WEBRTC_TRACE_DESTROY;
-        }
-    }
-    if(state == WEBRTC_TRACE_CREATE)
-    {
-        theTrace = TraceImpl::CreateTrace();
-
-    } else if(state == WEBRTC_TRACE_DESTROY) {
-        Trace* oldValue = theTrace;
-        theTrace = NULL;
-        // The lock is held by the scoped critical section. Release the lock
-        // temporarily so that the trace can be safely deleted. If the lock
-        // was kept during the delete, e.g. creating and destroying the trace
-        // too quickly may lead to a deadlock.
-        // This is due to the fact that starting and stopping a ThreadWrapper
-        // thread will trigger writing of trace messages.
-        // TODO (hellner): remove the tight coupling with the thread
-        //                 implementation.
-        crtiSect->Leave();
-        if(oldValue)
-        {
-            delete static_cast<TraceImpl*>(oldValue);
-        }
-        // Re-aqcuire the lock.
-        crtiSect->Enter();
-        return NULL;
-    }
-#else  // _WIN32
-    if(inc == WEBRTC_TRACE_INC_NO_CREATE && theTraceCount == 0)
-    {
-        return NULL;
-    }
-    if(inc == WEBRTC_TRACE_INC_NO_CREATE)
-    {
-        if(1 == InterlockedIncrement(&theTraceCount))
-        {
-            // The trace has been destroyed by some other thread. Rollback.
-            InterlockedDecrement(&theTraceCount);
-            assert(false);
-            return NULL;
-        }
-        // Sanity to catch corrupt state.
-        if(theTrace == NULL)
-        {
-            assert(false);
-            InterlockedDecrement(&theTraceCount);
-            return NULL;
-        }
-    } else if(inc == WEBRTC_TRACE_INC) {
-        if(theTraceCount == 0)
-        {
-            state = WEBRTC_TRACE_CREATE;
-        } else {
-            if(1 == InterlockedIncrement(&theTraceCount))
-            {
-                // InterlockedDecrement because reference count should not be
-                // updated just yet (that's done when the trace is created).
-                InterlockedDecrement(&theTraceCount);
-                state = WEBRTC_TRACE_CREATE;
-            }
-        }
-    } else {
-        int newValue = InterlockedDecrement(&theTraceCount);
-        if(newValue == 0)
-        {
-            state = WEBRTC_TRACE_DESTROY;
-        }
-    }
-
-    if(state == WEBRTC_TRACE_CREATE)
-    {
-        // Create trace and let whichever thread finishes first assign its local
-        // copy to the global instance. All other threads reclaim their local
-        // copy.
-        Trace* newTrace = TraceImpl::CreateTrace();
-        if(1 == InterlockedIncrement(&theTraceCount))
-        {
-            Trace* oldValue = (Trace*)InterlockedExchangePointer(
-                reinterpret_cast<void* volatile*>(&theTrace), newTrace);
-            assert(oldValue == NULL);
-            assert(theTrace);
-        } else {
-            InterlockedDecrement(&theTraceCount);
-            if(newTrace)
-            {
-                delete static_cast<TraceImpl*>(newTrace);
-            }
-        }
-        return NULL;
-    } else if(state == WEBRTC_TRACE_DESTROY)
-    {
-        Trace* oldValue = (Trace*)InterlockedExchangePointer(
-            reinterpret_cast<void* volatile*>(&theTrace), NULL);
-        if(oldValue)
-        {
-            delete static_cast<TraceImpl*>(oldValue);
-        }
-        return NULL;
-    }
-#endif // #ifndef _WIN32
-    return theTrace;
-}
-
-void Trace::CreateTrace()
-{
-    TraceImpl::StaticInstance(WEBRTC_TRACE_INC);
-}
-
-void Trace::ReturnTrace()
-{
-    TraceImpl::StaticInstance(WEBRTC_TRACE_DEC);
+    TraceImpl* impl =
+        GetStaticInstance<TraceImpl>(count_operation);
+    return impl;
 }
 
 TraceImpl* TraceImpl::GetTrace(const TraceLevel level)
 {
-    return (TraceImpl*)StaticInstance(WEBRTC_TRACE_INC_NO_CREATE, level);
+    return StaticInstance(kAddRefNoCreate, level);
 }
 
-Trace* TraceImpl::CreateTrace()
+TraceImpl* TraceImpl::CreateInstance()
 {
 #if defined(_WIN32)
     return new TraceWindows();
 #else
-    return new TraceLinux();
+    return new TracePosix();
 #endif
 }
 
 TraceImpl::TraceImpl()
-    : _critsectInterface(*CriticalSectionWrapper::CreateCriticalSection()),
+    : _critsectInterface(CriticalSectionWrapper::CreateCriticalSection()),
       _callback(NULL),
       _rowCountText(0),
       _fileCountText(0),
@@ -212,7 +76,7 @@
       _thread(*ThreadWrapper::CreateThread(TraceImpl::Run, this,
                                            kHighestPriority, "Trace")),
       _event(*EventWrapper::Create()),
-      _critsectArray(*CriticalSectionWrapper::CreateCriticalSection()),
+      _critsectArray(CriticalSectionWrapper::CreateCriticalSection()),
       _nextFreeIdx(),
       _level(),
       _length(),
@@ -271,8 +135,8 @@
     delete &_event;
     delete &_traceFile;
     delete &_thread;
-    delete &_critsectInterface;
-    delete &_critsectArray;
+    delete _critsectInterface;
+    delete _critsectArray;
 
     for(int m = 0; m < WEBRTC_TRACE_NUM_ARRAY; m++)
     {
@@ -607,9 +471,12 @@
 
     if(_nextFreeIdx[_activeQueue] == WEBRTC_TRACE_MAX_QUEUE-1)
     {
-        // Loggin more messages than can be worked off. Log a warning.
+        // Logging more messages than can be worked off. Log a warning.
+        const char warning_msg[] = "WARNING MISSING TRACE MESSAGES\n";
+        _level[_activeQueue][_nextFreeIdx[_activeQueue]] = kTraceWarning;
+        _length[_activeQueue][_nextFreeIdx[_activeQueue]] = strlen(warning_msg);
         memcpy(_messageQueue[_activeQueue][_nextFreeIdx[_activeQueue]],
-               "WARNING MISSING TRACE MESSAGES\n", 32);
+               warning_msg, _length[_activeQueue][idx]);
         _nextFreeIdx[_activeQueue]++;
     }
 }
@@ -832,7 +699,8 @@
     }
 
     memcpy(fileNameWithCounterUTF8, fileNameUTF8, lengthTo_);
-    sprintf(fileNameWithCounterUTF8+lengthTo_, "_%lu%s", newCount,
+    sprintf(fileNameWithCounterUTF8+lengthTo_, "_%lu%s",
+            static_cast<long unsigned int> (newCount),
             fileNameUTF8+lengthWithoutFileEnding);
     return true;
 }
@@ -865,21 +733,32 @@
     }
     memcpy(fileNameWithCounterUTF8, fileNameUTF8, lengthWithoutFileEnding);
     sprintf(fileNameWithCounterUTF8+lengthWithoutFileEnding, "_%lu%s",
-            newCount, fileNameUTF8+lengthWithoutFileEnding);
+            static_cast<long unsigned int> (newCount),
+            fileNameUTF8+lengthWithoutFileEnding);
     return true;
 }
 
+void Trace::CreateTrace()
+{
+    TraceImpl::StaticInstance(kAddRef);
+}
+
+void Trace::ReturnTrace()
+{
+    TraceImpl::StaticInstance(kRelease);
+}
+
 WebRtc_Word32 Trace::SetLevelFilter(WebRtc_UWord32 filter)
 {
     levelFilter = filter;
     return 0;
-};
+}
 
 WebRtc_Word32 Trace::LevelFilter(WebRtc_UWord32& filter)
 {
     filter = levelFilter;
     return 0;
-};
+}
 
 WebRtc_Word32 Trace::TraceFile(WebRtc_Word8 fileName[FileWrapper::kMaxFileNameSize])
 {
@@ -946,4 +825,5 @@
         ReturnTrace();
     }
 }
+
 } // namespace webrtc
diff --git a/src/system_wrappers/source/trace_impl.h b/src/system_wrappers/source/trace_impl.h
index 42e82fe..455a3d5 100644
--- a/src/system_wrappers/source/trace_impl.h
+++ b/src/system_wrappers/source/trace_impl.h
@@ -14,23 +14,11 @@
 #include "system_wrappers/interface/critical_section_wrapper.h"
 #include "system_wrappers/interface/event_wrapper.h"
 #include "system_wrappers/interface/file_wrapper.h"
+#include "system_wrappers/interface/static_instance.h"
 #include "system_wrappers/interface/trace.h"
 #include "system_wrappers/interface/thread_wrapper.h"
 
 namespace webrtc {
-enum TraceCount
-{
-    WEBRTC_TRACE_DEC    = 0,
-    WEBRTC_TRACE_INC    = 1,
-    WEBRTC_TRACE_INC_NO_CREATE = 2
-};
-
-enum TraceCreate
-{
-    WEBRTC_TRACE_EXIST    = 0,
-    WEBRTC_TRACE_CREATE   = 1,
-    WEBRTC_TRACE_DESTROY  = 2
-};
 
 // TODO (pwestin) WEBRTC_TRACE_MAX_QUEUE needs to be tweaked
 // TODO (hellner) the buffer should be close to how much the system can write to
@@ -58,12 +46,9 @@
 public:
     virtual ~TraceImpl();
 
-    static Trace* CreateTrace();
+    static TraceImpl* CreateInstance();
     static TraceImpl* GetTrace(const TraceLevel level = kTraceAll);
 
-    static Trace* StaticInstance(TraceCount inc,
-                                 const TraceLevel level = kTraceAll);
-
     WebRtc_Word32 SetTraceFileImpl(const WebRtc_Word8* fileName,
                                    const bool addFileCounter);
     WebRtc_Word32 TraceFileImpl(
@@ -81,6 +66,9 @@
 protected:
     TraceImpl();
 
+    static TraceImpl* StaticInstance(CountOperation count_operation,
+        const TraceLevel level = kTraceAll);
+
     // OS specific implementations
     virtual WebRtc_Word32 AddThreadId(char* traceMessage) const = 0;
     virtual WebRtc_Word32 AddTime(char* traceMessage,
@@ -93,6 +81,8 @@
     bool Process();
 
 private:
+    friend class Trace;
+
     WebRtc_Word32 AddLevel(char* szMessage, const TraceLevel level) const;
 
     WebRtc_Word32 AddModuleAndId(char* traceMessage, const TraceModule module,
@@ -119,7 +109,7 @@
 
     void WriteToFile();
 
-    CriticalSectionWrapper& _critsectInterface;
+    CriticalSectionWrapper* _critsectInterface;
     TraceCallback* _callback;
     WebRtc_UWord32 _rowCountText;
     WebRtc_UWord32 _fileCountText;
@@ -129,7 +119,7 @@
     EventWrapper& _event;
 
     // _critsectArray protects _activeQueue
-    CriticalSectionWrapper& _critsectArray;
+    CriticalSectionWrapper* _critsectArray;
     WebRtc_UWord16 _nextFreeIdx[WEBRTC_TRACE_NUM_ARRAY];
     TraceLevel _level[WEBRTC_TRACE_NUM_ARRAY][WEBRTC_TRACE_MAX_QUEUE];
     WebRtc_UWord16 _length[WEBRTC_TRACE_NUM_ARRAY][WEBRTC_TRACE_MAX_QUEUE];
diff --git a/src/system_wrappers/source/trace_impl_no_op.cc b/src/system_wrappers/source/trace_impl_no_op.cc
new file mode 100644
index 0000000..1752871
--- /dev/null
+++ b/src/system_wrappers/source/trace_impl_no_op.cc
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "trace.h"
+
+namespace webrtc {
+
+void Trace::CreateTrace()
+{
+}
+
+void Trace::ReturnTrace()
+{
+}
+
+WebRtc_Word32 Trace::SetLevelFilter(WebRtc_UWord32 /*filter*/)
+{
+    return 0;
+}
+
+WebRtc_Word32 Trace::LevelFilter(WebRtc_UWord32& /*filter*/)
+{
+    return 0;
+}
+
+WebRtc_Word32 Trace::TraceFile(
+    WebRtc_Word8 /*fileName*/[1024])
+{
+    return -1;
+}
+
+WebRtc_Word32 Trace::SetTraceFile(const WebRtc_Word8* /*fileName*/,
+                                  const bool /*addFileCounter*/)
+{
+    return -1;
+}
+
+WebRtc_Word32 Trace::SetTraceCallback(TraceCallback* /*callback*/)
+{
+    return -1;
+}
+
+void Trace::Add(const TraceLevel /*level*/, const TraceModule /*module*/,
+                const WebRtc_Word32 /*id*/, const char* /*msg*/, ...)
+
+{
+}
+
+} // namespace webrtc
diff --git a/src/system_wrappers/source/trace_linux.cc b/src/system_wrappers/source/trace_posix.cc
similarity index 80%
rename from src/system_wrappers/source/trace_linux.cc
rename to src/system_wrappers/source/trace_posix.cc
index 8dba3be..198c434 100644
--- a/src/system_wrappers/source/trace_linux.cc
+++ b/src/system_wrappers/source/trace_posix.cc
@@ -8,15 +8,17 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "trace_linux.h"
+#include "trace_posix.h"
 
 #include <cassert>
 #include <stdarg.h>
 #include <stdio.h>
 #include <string.h>
 #include <time.h>
-
-#ifdef ANDROID
+#ifdef __linux__
+    #include <sys/syscall.h>
+#endif
+#ifdef WEBRTC_ANDROID
     #include <pthread.h>
 #else
     #include <iostream>
@@ -37,26 +39,31 @@
 #define BUILDINFO BUILDDATE " " BUILDTIME " " BUILDMODE
 
 namespace webrtc {
-TraceLinux::TraceLinux()
+TracePosix::TracePosix()
 {
     _prevAPITickCount = time(NULL);
     _prevTickCount = _prevAPITickCount;
 }
 
-TraceLinux::~TraceLinux()
+TracePosix::~TracePosix()
 {
     StopThread();
 }
 
-WebRtc_Word32 TraceLinux::AddThreadId(char* traceMessage) const
-{
-    WebRtc_UWord64 threadId = (WebRtc_UWord64)pthread_self();
-    sprintf(traceMessage, "%10llu; ", threadId);
-    // 12 bytes are written.
-    return 12;
+WebRtc_Word32 TracePosix::AddThreadId(char* traceMessage) const {
+#ifdef __linux__
+  pid_t threadId = (pid_t) syscall(__NR_gettid);
+  sprintf(traceMessage, "%10d; ", threadId);
+#else
+  WebRtc_UWord64 threadId = (WebRtc_UWord64)pthread_self();
+  sprintf(traceMessage, "%10llu; ",
+          static_cast<long long unsigned int>(threadId));
+#endif
+  // 12 bytes are written.
+  return 12;
 }
 
-WebRtc_Word32 TraceLinux::AddTime(char* traceMessage,
+WebRtc_Word32 TracePosix::AddTime(char* traceMessage,
                                   const TraceLevel level) const
 {
     time_t dwCurrentTimeInSeconds = time(NULL);
@@ -109,14 +116,14 @@
     return 22;
 }
 
-WebRtc_Word32 TraceLinux::AddBuildInfo(char* traceMessage) const
+WebRtc_Word32 TracePosix::AddBuildInfo(char* traceMessage) const
 {
     sprintf(traceMessage, "Build info: %s", BUILDINFO);
     // Include NULL termination (hence + 1).
     return strlen(traceMessage) + 1;
 }
 
-WebRtc_Word32 TraceLinux::AddDateTimeInfo(char* traceMessage) const
+WebRtc_Word32 TracePosix::AddDateTimeInfo(char* traceMessage) const
 {
     time_t t;
     time(&t);
diff --git a/src/system_wrappers/source/trace_linux.h b/src/system_wrappers/source/trace_posix.h
similarity index 79%
rename from src/system_wrappers/source/trace_linux.h
rename to src/system_wrappers/source/trace_posix.h
index 6e327a0..099bcc8 100644
--- a/src/system_wrappers/source/trace_linux.h
+++ b/src/system_wrappers/source/trace_posix.h
@@ -8,18 +8,18 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_TRACE_LINUX_H_
-#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_TRACE_LINUX_H_
+#ifndef WEBRTC_SYSTEM_WRAPPERS_SOURCE_TRACE_POSIX_H_
+#define WEBRTC_SYSTEM_WRAPPERS_SOURCE_TRACE_POSIX_H_
 
 #include "critical_section_wrapper.h"
 #include "trace_impl.h"
 
 namespace webrtc {
-class TraceLinux : public TraceImpl
+class TracePosix : public TraceImpl
 {
 public:
-    TraceLinux();
-    virtual ~TraceLinux();
+    TracePosix();
+    virtual ~TracePosix();
 
     virtual WebRtc_Word32 AddThreadId(char *traceMessage) const;
     virtual WebRtc_Word32 AddTime(char* traceMessage,
@@ -34,4 +34,4 @@
 };
 } // namespace webrtc
 
-#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_TRACE_LINUX_H_
+#endif // WEBRTC_SYSTEM_WRAPPERS_SOURCE_TRACE_POSIX_H_
diff --git a/src/system_wrappers/test/Test.cpp b/src/system_wrappers/test/Test.cpp
deleted file mode 100644
index 7a34166..0000000
--- a/src/system_wrappers/test/Test.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <cassert>
-#include <iostream>
-
-#ifdef _WIN32
-    #include <windows.h>
-    #include <tchar.h>
-#else
-    #include <stdio.h>
-    #define Sleep(x) usleep(x*1000)
-#endif
-
-#include "common_types.h"
-#include "trace.h"
-#include "cpu_wrapper.h"
-
-
-#ifdef _WIN32
-int _tmain(int argc, _TCHAR* argv[])
-#else
-int main(int argc, char* argv[])
-#endif
-{
-    Trace::CreateTrace();
-    Trace::SetTraceFile("testTrace.txt");
-    Trace::SetLevelFilter(webrtc::kTraceAll);
-
-    printf("Start system wrapper test\n");
-
-    printf("Number of cores detected:%u\n", (unsigned int)CpuWrapper::DetectNumberOfCores());
-
-    CpuWrapper* cpu = CpuWrapper::CreateCpu();
-
-    WebRtc_UWord32 numCores;
-    WebRtc_UWord32* cores;
-
-    for(int i = 0; i< 10;i++)
-    {
-        WebRtc_Word32 total = cpu->CpuUsageMultiCore(numCores, cores);
-
-        printf("\nNumCores:%d\n", (int)numCores);
-        printf("Total cpu:%d\n", (int)total);
-
-        for (WebRtc_UWord32 i = 0; i< numCores;i++)
-        {
-            printf("Core:%lu CPU:%lu \n", i, cores[i]);
-        }
-        Sleep(1000);
-    }
-
-    printf("Done system wrapper test\n");
-
-    delete cpu;
-
-    Trace::ReturnTrace();
-};
diff --git a/src/typedefs.h b/src/typedefs.h
index ae71690..ba87309 100644
--- a/src/typedefs.h
+++ b/src/typedefs.h
@@ -8,36 +8,40 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-/*
- *
- * This file contains type definitions used in all WebRtc APIs.
- * 
- */
+// This file contains platform-specific typedefs and defines.
 
-/* Reserved words definitions */
+#ifndef WEBRTC_TYPEDEFS_H_
+#define WEBRTC_TYPEDEFS_H_
+
+// Reserved words definitions
+// TODO(andrew): Look at removing these.
 #define WEBRTC_EXTERN extern
 #define G_CONST const
 #define WEBRTC_INLINE extern __inline
 
-#ifndef WEBRTC_TYPEDEFS_H
-#define WEBRTC_TYPEDEFS_H
-
-/* Define WebRtc preprocessor identifiers based on the current build platform */
+// Define WebRTC preprocessor identifiers based on the current build platform.
+// TODO(andrew): Clean these up. We can probably remove everything in this
+// block.
+//   - TARGET_MAC_INTEL and TARGET_MAC aren't used anywhere.
+//   - In the few places where TARGET_PC is used, it should be replaced by
+//     something more specific.
+//   - Do we really support PowerPC? Probably not. Remove WEBRTC_MAC_INTEL
+//     from build/common.gypi as well.
 #if defined(WIN32)
-    // Windows & Windows Mobile
+    // Windows & Windows Mobile.
     #if !defined(WEBRTC_TARGET_PC)
         #define WEBRTC_TARGET_PC
     #endif
 #elif defined(__APPLE__)
-    // Mac OS X
-    #if defined(__LITTLE_ENDIAN__ ) //TODO: is this used?
+    // Mac OS X.
+    #if defined(__LITTLE_ENDIAN__ )
         #if !defined(WEBRTC_TARGET_MAC_INTEL)
             #define WEBRTC_TARGET_MAC_INTEL
-        #endif  
+        #endif
     #else
         #if !defined(WEBRTC_TARGET_MAC)
             #define WEBRTC_TARGET_MAC
-        #endif  
+        #endif
     #endif
 #else
     // Linux etc.
@@ -46,6 +50,40 @@
     #endif
 #endif
 
+// Derived from Chromium's build/build_config.h
+// Processor architecture detection.  For more info on what's defined, see:
+//   http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+//   http://www.agner.org/optimize/calling_conventions.pdf
+//   or with gcc, run: "echo | gcc -E -dM -"
+// TODO(andrew): replace WEBRTC_LITTLE_ENDIAN with WEBRTC_ARCH_LITTLE_ENDIAN?
+#if defined(_M_X64) || defined(__x86_64__)
+#define WEBRTC_ARCH_X86_FAMILY
+#define WEBRTC_ARCH_X86_64
+#define WEBRTC_ARCH_64_BITS
+#define WEBRTC_ARCH_LITTLE_ENDIAN
+#elif defined(_M_IX86) || defined(__i386__)
+#define WEBRTC_ARCH_X86_FAMILY
+#define WEBRTC_ARCH_X86
+#define WEBRTC_ARCH_32_BITS
+#define WEBRTC_ARCH_LITTLE_ENDIAN
+#elif defined(__ARMEL__)
+// TODO(andrew): We'd prefer to control platform defines here, but this is
+// currently provided by the Android makefiles. Commented to avoid duplicate
+// definition warnings.
+//#define WEBRTC_ARCH_ARM
+// TODO(andrew): Chromium uses the following two defines. Should we switch?
+//#define WEBRTC_ARCH_ARM_FAMILY
+//#define WEBRTC_ARCH_ARMEL
+#define WEBRTC_ARCH_32_BITS
+#define WEBRTC_ARCH_LITTLE_ENDIAN
+#else
+#error Please add support for your architecture in typedefs.h
+#endif
+
+#if defined(__SSE2__) || defined(_MSC_VER)
+#define WEBRTC_USE_SSE2
+#endif
+
 #if defined(WEBRTC_TARGET_PC)
 
 #if !defined(_MSC_VER)
@@ -79,7 +117,7 @@
     typedef char                WebRtc_Word8;
     typedef uint8_t             WebRtc_UWord8;
 
-    /* Define endian for the platform */
+    // Define endian for the platform
     #define WEBRTC_LITTLE_ENDIAN
 
 #elif defined(WEBRTC_TARGET_MAC_INTEL)
@@ -94,14 +132,11 @@
     typedef uint16_t            WebRtc_UWord16;
     typedef uint8_t             WebRtc_UWord8;
 
-    /* Define endian for the platform */
+    // Define endian for the platform
     #define WEBRTC_LITTLE_ENDIAN
 
 #else
-
-    #error "No platform defined for WebRtc type definitions (webrtc_typedefs.h)"
-
+    #error "No platform defined for WebRTC type definitions (typedefs.h)"
 #endif
 
-
-#endif // WEBRTC_TYPEDEFS_H
+#endif  // WEBRTC_TYPEDEFS_H_
diff --git a/test/OWNERS b/test/OWNERS
new file mode 100644
index 0000000..777963e
--- /dev/null
+++ b/test/OWNERS
@@ -0,0 +1,4 @@
+phoglund@webrtc.org

+kjellander@webrtc.org

+ivinnichenko@webrtc.org

+amyfong@webrtc.org

diff --git a/test/data/audio_processing/aec_far.pcm b/test/data/audio_processing/aec_far.pcm
new file mode 100644
index 0000000..fd6afc0
--- /dev/null
+++ b/test/data/audio_processing/aec_far.pcm
Binary files differ
diff --git a/test/data/audio_processing/aec_near.pcm b/test/data/audio_processing/aec_near.pcm
new file mode 100644
index 0000000..02c016c
--- /dev/null
+++ b/test/data/audio_processing/aec_near.pcm
Binary files differ
diff --git a/test/data/audio_processing/android/output_data_fixed.pb b/test/data/audio_processing/android/output_data_fixed.pb
new file mode 100644
index 0000000..2f45fd3
--- /dev/null
+++ b/test/data/audio_processing/android/output_data_fixed.pb
Binary files differ
diff --git a/test/data/audio_processing/android/output_data_float.pb b/test/data/audio_processing/android/output_data_float.pb
new file mode 100644
index 0000000..1bf18c2
--- /dev/null
+++ b/test/data/audio_processing/android/output_data_float.pb
Binary files differ
diff --git a/test/data/audio_processing/output_data_fixed.pb b/test/data/audio_processing/output_data_fixed.pb
new file mode 100644
index 0000000..81bc5af
--- /dev/null
+++ b/test/data/audio_processing/output_data_fixed.pb
Binary files differ
diff --git a/test/data/audio_processing/output_data_float.pb b/test/data/audio_processing/output_data_float.pb
new file mode 100644
index 0000000..ccd7509
--- /dev/null
+++ b/test/data/audio_processing/output_data_float.pb
Binary files differ
diff --git a/test/functional_test/README b/test/functional_test/README
new file mode 100644
index 0000000..a855135
--- /dev/null
+++ b/test/functional_test/README
@@ -0,0 +1,41 @@
+This test client is a simple functional test for WebRTC enabled Chrome build.
+
+The following is necessary to run the test:
+- A WebRTC Chrome binary.
+- A peerconnection_server binary (make peerconnection_server).
+
+It can be used in two scenarios:
+1. Single client calling itself with the server test page
+(peerconnection/samples/server/server_test.html) in loopback mode as a fake
+client.
+2. Call between two clients.
+
+To start the test for scenario (1):
+1. Start peerconnection_server.
+2. Start the WebRTC Chrome build: $ <path_to_chome_binary>/chrome
+--enable-media-stream --enable-p2papi --user-data-dir=<path_to_data>
+<path_to_data> is where Chrome looks for all its states, use for example
+"temp/chrome_webrtc_data". If you don't always start the browser from the same
+directory, use an absolute path instead.
+3. Open the server test page, ensure loopback is enabled, choose a name (for
+example "loopback") and connect to the server.
+4. Open the test page, connect to the server, select the loopback peer, click
+call.
+
+To start the test for scenario (2):
+1. Start peerconnection_server.
+2. Start the WebRTC Chrome build, see scenario (1).
+3. Open the test page, connect to the server.
+4. On another machine, start the WebRTC Chrome build.
+5. Open the test page, connect to the server, select the other peer, click call.
+
+Note 1: There is currently a limitation so that the camera device can only be
+accessed once, even if in the same browser instance. Hence the need to use two
+machines for scenario (2).
+
+Note 2: The web page must normally be on a web server to be able to access the
+camera for security reasons.
+See http://blog.chromium.org/2008/12/security-in-depth-local-web-pages.html
+for more details on this topic. This can be overridden with the flag
+--allow-file-access-from-files, in which case running it over the file://
+URI scheme works.
diff --git a/test/functional_test/webrtc_test.html b/test/functional_test/webrtc_test.html
new file mode 100644
index 0000000..e2d8939
--- /dev/null
+++ b/test/functional_test/webrtc_test.html
@@ -0,0 +1,594 @@
+<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
+
+<!--
+Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+
+Use of this source code is governed by a BSD-style license
+that can be found in the LICENSE file in the root of the source
+tree. An additional intellectual property rights grant can be found
+in the file PATENTS. All contributing project authors may
+be found in the AUTHORS file in the root of the source tree.
+-->
+
+<html>
+
+<head>
+<title>WebRTC Test</title>
+
+<style type="text/css">
+body, input, button, select, table {
+  font-family:"Lucida Grande", "Lucida Sans", Verdana, Arial, sans-serif;
+  font-size: 13 px;
+}
+body, input:enable, button:enable, select:enable, table {
+  color: rgb(51, 51, 51);
+}
+h1 {font-size: 40 px;}
+</style>
+
+<script type="text/javascript">
+
+// TODO: Catch more exceptions
+
+var server;
+var myId = -1;
+var myName;
+var remoteId = -1;
+var remoteName;
+var request = null;
+var hangingGet = null;
+var pc = null;
+var localStream = null;
+var disconnecting = false;
+var callState = 0; // 0 - Not started, 1 - Call ongoing
+
+
+// General
+
+function toggleExtraButtons() {
+  document.getElementById("createPcBtn").hidden =
+    !document.getElementById("createPcBtn").hidden;
+  document.getElementById("test1Btn").hidden =
+    !document.getElementById("test1Btn").hidden;
+}
+
+function trace(txt) {
+  var elem = document.getElementById("debug");
+  elem.innerHTML += txt + "<br>";
+}
+
+function trace_warning(txt) {
+  var wtxt = "<b>" + txt + "</b>";
+  trace(wtxt);
+}
+
+function trace_exception(e, txt) {
+  var etxt = "<b>" + txt + "</b> (" + e.name + " / " + e.message + ")";
+  trace(etxt);
+}
+
+function setCallState(state) {
+  trace("Changing call state: " + callState + " -> " + state);
+  callState = state;
+}
+
+function checkPeerConnection() {
+  if (!pc) {
+    trace_warning("No PeerConnection object exists");
+    return 0;
+  }
+  return 1;
+}
+
+
+// Local stream generation
+
+function gotStream(s) {
+  var url = webkitURL.createObjectURL(s);
+  document.getElementById("localView").src = url;
+  trace("User has granted access to local media. url = " + url);
+  localStream = s;
+}
+
+function gotStreamFailed(error) {
+  alert("Failed to get access to local media. Error code was " + error.code +
+    ".");
+  trace_warning("Failed to get access to local media. Error code was " +
+    error.code);
+}
+
+function getUserMedia() {
+  try {
+    navigator.webkitGetUserMedia("video,audio", gotStream, gotStreamFailed);
+    trace("Requested access to local media");
+  } catch (e) {
+    trace_exception(e, "getUserMedia error");
+  }
+}
+
+
+// Peer list and remote peer handling
+
+function peerExists(id) {
+  try {
+    var peerList = document.getElementById("peers");
+    for (var i = 0; i < peerList.length; i++) {
+      if (parseInt(peerList.options[i].value) == id)
+        return true;
+    }
+  } catch (e) {
+    trace_exception(e, "Error searching for peer");
+  }
+  return false;
+}
+
+function addPeer(id, pname) {
+  var peerList = document.getElementById("peers");
+  var option = document.createElement("option");
+  option.text = pname;
+  option.value = id;
+  try {
+    // For IE earlier than version 8
+    peerList.add(option, x.options[null]);
+  } catch (e) {
+    peerList.add(option, null);
+  }
+}
+
+function removePeer(id) {
+  try {
+    var peerList = document.getElementById("peers");
+    for (var i = 0; i < peerList.length; i++) {
+      if (parseInt(peerList.options[i].value) == id) {
+        peerList.remove(i);
+        break;
+      }
+    }
+  } catch (e) {
+    trace_exception(e, "Error removing peer");
+  }
+}
+
+function clearPeerList() {
+  var peerList = document.getElementById("peers");
+  while (peerList.length > 0)
+    peerList.remove(0);
+}
+
+function setSelectedPeer(id) {
+  try {
+    var peerList = document.getElementById("peers");
+    for (var i = 0; i < peerList.length; i++) {
+      if (parseInt(peerList.options[i].value) == id) {
+        peerList.options[i].selected = true;
+        return true;
+      }
+    }
+  } catch (e) {
+    trace_exception(e, "Error setting selected peer");
+  }
+  return false;
+}
+
+function getPeerName(id) {
+  try {
+    var peerList = document.getElementById("peers");
+    for (var i = 0; i < peerList.length; i++) {
+      if (parseInt(peerList.options[i].value) == id) {
+        return peerList.options[i].text;
+      }
+    }
+  } catch (e) {
+    trace_exception(e, "Error finding peer name");
+    return;
+  }
+  return;
+}
+
+function storeRemoteInfo() {
+  try {
+    var peerList = document.getElementById("peers");
+    if (peerList.selectedIndex < 0) {
+      alert("Please select a peer.");
+      return false;
+    } else
+      remoteId = parseInt(peerList.options[peerList.selectedIndex].value);
+      remoteName = peerList.options[peerList.selectedIndex].text;
+  } catch (e) {
+    trace_exception(e, "Error storing remote peer info");
+    return false;
+  }
+  return true;
+}
+
+
+// Call control
+
+function createPeerConnection() {
+  if (pc) {
+    trace_warning("PeerConnection object already exists");
+  }
+  trace("Creating PeerConnection object");
+  try {
+    pc = new webkitPeerConnection("STUN stun.l.google.com:19302",
+      onSignalingMessage);
+  pc.onaddstream = onAddStream;
+  pc.onremovestream = onRemoveStream;
+  } catch (e) {
+    trace_exception(e, "Create PeerConnection error");
+  }
+}
+
+function doCall() {
+  if (!storeRemoteInfo())
+    return;
+  document.getElementById("call").disabled = true;
+  document.getElementById("peers").disabled = true;
+  createPeerConnection();
+  trace("Adding stream");
+  pc.addStream(localStream);
+  document.getElementById("hangup").disabled = false;
+  setCallState(1);
+}
+
+function hangUp() {
+  document.getElementById("hangup").disabled = true;
+  trace("Sending BYE to " + remoteName + " (ID " + remoteId + ")");
+  sendToPeer(remoteId, "BYE");
+  closeCall();
+}
+
+function closeCall() {
+  trace("Stopping showing remote stream");
+  document.getElementById("remoteView").src = "dummy";
+  if (pc) {
+    trace("Stopping call [pc.close()]");
+    pc.close();
+    pc = null;
+  } else
+    trace("No pc object to close");
+  remoteId = -1;
+  document.getElementById("call").disabled = false;
+  document.getElementById("peers").disabled = false;
+  setCallState(0);
+}
+
+
+// PeerConnection callbacks
+
+function onAddStream(e) {
+  var stream = e.stream;
+  var url = webkitURL.createObjectURL(stream);
+  document.getElementById("remoteView").src = url;
+  trace("Started showing remote stream. url = " + url);
+}
+
+function onRemoveStream(e) {
+  // Currently if we get this callback, call has ended.
+  document.getElementById("remoteView").src = "";
+  trace("Stopped showing remote stream");
+}
+
+function onSignalingMessage(msg) {
+  trace("Sending message to " + remoteName + " (ID " + remoteId + "):\n" + msg);
+  sendToPeer(remoteId, msg);
+}
+
+// TODO: Add callbacks onconnecting, onopen and onstatechange.
+
+
+// Server interaction
+
+function handleServerNotification(data) {
+  trace("Server notification: " + data);
+  var parsed = data.split(",");
+  if (parseInt(parsed[2]) == 1) { // New peer
+    var peerId = parseInt(parsed[1]);
+    if (!peerExists(peerId)) {
+      var peerList = document.getElementById("peers");
+      if (peerList.length == 1 && peerList.options[0].value == -1)
+        clearPeerList();
+      addPeer(peerId, parsed[0]);
+      document.getElementById("peers").disabled = false;
+      document.getElementById("call").disabled = false;
+    }
+  } else if (parseInt(parsed[2]) == 0) { // Removed peer
+    removePeer(parseInt(parsed[1]));
+    if (document.getElementById("peers").length == 0) {
+      document.getElementById("peers").disabled = true;
+      addPeer(-1, "No other peer connected");
+    }
+  }
+}
+
+function handlePeerMessage(peer_id, msg) {
+  var peerName = getPeerName(peer_id);
+  if (peerName == undefined) {
+    trace_warning("Received message from unknown peer (ID " + peer_id +
+      "), ignoring message:");
+    trace(msg);
+    return;
+  }
+  trace("Received message from " + peerName + " (ID " + peer_id + "):\n" + msg);
+  // Assuming we receive the message from the peer we want to communicate with.
+  // TODO: Only accept messages from peer we communicate with with if call is
+  // ongoing.
+  if (msg.search("BYE") == 0) {
+    // Other side has hung up.
+    document.getElementById("hangup").disabled = true;
+    closeCall()
+  } else {
+    if (!pc) {
+      // Other side is calling us, startup
+      if (!setSelectedPeer(peer_id)) {
+        trace_warning("Recevied message from unknown peer, ignoring");
+        return;
+      }
+      if (!storeRemoteInfo())
+        return;
+      document.getElementById("call").disabled = true;
+      document.getElementById("peers").disabled = true;
+      createPeerConnection();
+      try {
+        pc.processSignalingMessage(msg);
+      } catch (e) {
+        trace_exception(e, "Process signaling message error");
+      }
+      trace("Adding stream");
+      pc.addStream(localStream);
+      document.getElementById("hangup").disabled = false;
+    } else {
+      try {
+        pc.processSignalingMessage(msg);
+      } catch (e) {
+        trace_exception(e, "Process signaling message error");
+      }
+    }
+  }
+}
+
+function getIntHeader(r, name) {
+  var val = r.getResponseHeader(name);
+  trace("header value: " + val);
+  return val != null && val.length ? parseInt(val) : -1;
+}
+
+function hangingGetCallback() {
+  try {
+    if (hangingGet.readyState != 4 || disconnecting)
+      return;
+    if (hangingGet.status != 200) {
+      trace_warning("server error, status: " + hangingGet.status + ", text: " +
+        hangingGet.statusText);
+      disconnect();
+    } else {
+      var peer_id = getIntHeader(hangingGet, "Pragma");
+      if (peer_id == myId) {
+        handleServerNotification(hangingGet.responseText);
+      } else {
+        handlePeerMessage(peer_id, hangingGet.responseText);
+      }
+    }
+
+    if (hangingGet) {
+      hangingGet.abort();
+      hangingGet = null;
+    }
+
+    if (myId != -1)
+      window.setTimeout(startHangingGet, 0);
+  } catch (e) {
+    trace_exception(e, "Hanging get error");
+  }
+}
+
+function onHangingGetTimeout() {
+  trace("hanging get timeout. issuing again");
+  hangingGet.abort();
+  hangingGet = null;
+  if (myId != -1)
+    window.setTimeout(startHangingGet, 0);
+}
+
+function startHangingGet() {
+  try {
+    hangingGet = new XMLHttpRequest();
+    hangingGet.onreadystatechange = hangingGetCallback;
+    hangingGet.ontimeout = onHangingGetTimeout;
+    hangingGet.open("GET", server + "/wait?peer_id=" + myId, true);
+    hangingGet.send();  
+  } catch (e) {
+    trace_exception(e, "Start hanging get error");
+  }
+}
+
+function sendToPeer(peer_id, data) {
+  if (myId == -1) {
+    alert("Not connected.");
+    return;
+  }
+  if (peer_id == myId) {
+    alert("Can't send a message to oneself.");
+    return;
+  }
+  var r = new XMLHttpRequest();
+  r.open("POST", server + "/message?peer_id=" + myId + "&to=" + peer_id, false);
+  r.setRequestHeader("Content-Type", "text/plain");
+  r.send(data);
+  r = null;
+}
+
+function signInCallback() {
+  try {
+    if (request.readyState == 4) {
+      if (request.status == 200) {
+        var peers = request.responseText.split("\n");
+        myId = parseInt(peers[0].split(",")[1]);
+        trace("My id: " + myId);
+        clearPeerList();
+        var added = 0;
+        for (var i = 1; i < peers.length; ++i) {
+          if (peers[i].length > 0) {
+            trace("Peer " + i + ": " + peers[i]);
+            var parsed = peers[i].split(",");
+            addPeer(parseInt(parsed[1]), parsed[0]);
+            ++added;
+          }
+        }
+        if (added == 0)
+          addPeer(-1, "No other peer connected");
+        else {
+          document.getElementById("peers").disabled = false;
+          document.getElementById("call").disabled = false;
+        }
+        startHangingGet();
+        request = null;
+        document.getElementById("connect").disabled = true;
+        document.getElementById("disconnect").disabled = false;
+      }
+    }
+  } catch (e) {
+    trace_exception(e, "Sign in error");
+    document.getElementById("connect").disabled = false;
+  }
+}
+
+function signIn() {
+  try {
+    request = new XMLHttpRequest();
+    request.onreadystatechange = signInCallback;
+    request.open("GET", server + "/sign_in?" + myName, true);
+    request.send();
+  } catch (e) {
+    trace_exception(e, "Start sign in error");
+    document.getElementById("connect").disabled = false;
+  }
+}
+
+function connect() {
+  myName = document.getElementById("local").value.toLowerCase();
+  server = document.getElementById("server").value.toLowerCase();
+  if (myName.length == 0) {
+    alert("I need a name please.");
+    document.getElementById("local").focus();
+  } else {
+    // TODO: Disable connect button here, but we need a timeout and check if we
+    // have connected, if so enable it again.
+    signIn();
+  }
+}
+
+function disconnect() {
+  if (callState == 1)
+    hangUp();
+
+  disconnecting = true;
+  
+  if (request) {
+    request.abort();
+    request = null;
+  }
+
+  if (hangingGet) {
+    hangingGet.abort();
+    hangingGet = null;
+  }
+
+  if (myId != -1) {
+    request = new XMLHttpRequest();
+    request.open("GET", server + "/sign_out?peer_id=" + myId, false);
+    request.send();
+    request = null;
+    myId = -1;
+  }
+
+  clearPeerList();
+  addPeer(-1, "Not connected");
+  document.getElementById("connect").disabled = false;
+  document.getElementById("disconnect").disabled = true;
+  document.getElementById("peers").disabled = true;
+  document.getElementById("call").disabled = true;
+
+  disconnecting = false;
+}
+
+
+// Window event handling
+
+window.onload = getUserMedia;
+window.onbeforeunload = disconnect;
+
+
+</script>
+</head>
+
+<body>
+<h1>WebRTC</h1>
+You must have a WebRTC capable browser in order to make calls using this test
+page.<br>&nbsp;
+
+<table border="0">
+<tr>
+ <td>Local Preview</td>
+ <td>Remote Video</td>
+</tr>
+<tr>
+ <td>
+  <video width="320" height="240" id="localView" autoplay="autoplay"></video>
+ </td>
+ <td>
+  <video width="640" height="480" id="remoteView" autoplay="autoplay"></video>
+ </td>
+</tr>
+</table>
+
+<table border="0">
+<tr>
+ <td valign="top">
+  <table border="0" cellpaddning="0" cellspacing="0">
+  <tr>
+   <td>Server:</td>
+   <td>
+    <input type="text" id="server" size="30" value="http://localhost:8888"/>
+   </td>
+  </tr>
+  <tr>
+   <td>Name:</td><td><input type="text" id="local" size="30" value="name"/></td>
+  </tr>
+  </table>
+ </td>
+ <td valign="top">
+  <button id="connect" onclick="connect();">Connect</button><br>
+  <button id="disconnect" onclick="disconnect();" disabled="true">Disconnect
+  </button>
+ </td>
+ <td>&nbsp;&nbsp;&nbsp;</td>
+ <td valign="top">
+  Connected peers:<br>
+  <select id="peers" size="5" disabled="true">
+   <option value="-1">Not connected</option>
+  </select>
+  </td>
+ <td valign="top">
+  <!--input type="text" id="peer_id" size="3" value="1"/><br-->
+  <button id="call" onclick="doCall();" disabled="true">Call</button><br>
+  <button id="hangup" onclick="hangUp();" disabled="true">Hang up</button><br>
+ </td>
+ <td>&nbsp;&nbsp;&nbsp;</td>
+ <td valign="top">
+  <button onclick="toggleExtraButtons();">Toggle extra buttons (debug)</button>
+  <br>
+  <button id="createPcBtn" onclick="createPeerConnection();" hidden="true">
+  Create peer connection</button>
+ </td>
+</tr>
+</table>
+
+<button onclick="document.getElementById('debug').innerHTML='';">Clear log
+</button>
+<pre id="debug"></pre>
+
+</body>
+
+</html>
+
diff --git a/test/metrics.gyp b/test/metrics.gyp
new file mode 100644
index 0000000..70483f9
--- /dev/null
+++ b/test/metrics.gyp
@@ -0,0 +1,46 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+  'includes': [
+    '../src/build/common.gypi',
+  ],
+  'targets': [
+    {
+      # The metrics code must be kept in its own GYP file in order to
+      # avoid a circular dependency error due to the dependency on libyuv.
+      # If the code would be put in test.gyp a circular dependency error during
+      # GYP generation would occur, because the libyuv.gypi unittest target
+      # depends on test_support_main. See issue #160 for more info.
+      'target_name': 'metrics',
+      'type': '<(library)',
+      'dependencies': [
+        '<(webrtc_root)/common_video/common_video.gyp:webrtc_libyuv',
+      ],
+      'include_dirs': [
+        '.',
+      ],
+      'sources': [
+        'testsupport/metrics/video_metrics.h',
+        'testsupport/metrics/video_metrics.cc',
+      ],
+    },
+    {
+      'target_name': 'metrics_unittests',
+      'type': 'executable',
+      'dependencies': [
+        'metrics',
+        '<(webrtc_root)/../test/test.gyp:test_support_main',
+        '<(webrtc_root)/../testing/gtest.gyp:gtest',
+      ],
+      'sources': [
+        'testsupport/metrics/video_metrics_unittest.cc',
+      ],
+    },
+  ],
+}
diff --git a/test/run_all_unittests.cc b/test/run_all_unittests.cc
new file mode 100644
index 0000000..0cdf0cd
--- /dev/null
+++ b/test/run_all_unittests.cc
@@ -0,0 +1,16 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "test/test_suite.h"
+
+int main(int argc, char** argv) {
+  webrtc::test::TestSuite test_suite(argc, argv);
+  return test_suite.Run();
+}
diff --git a/test/test.gyp b/test/test.gyp
new file mode 100644
index 0000000..86a57ff
--- /dev/null
+++ b/test/test.gyp
@@ -0,0 +1,78 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+# TODO(andrew): consider moving test_support to src/base/test.
+{
+  'includes': [
+    '../src/build/common.gypi',
+  ],
+  'targets': [
+    {
+      'target_name': 'test_support',
+      'type': 'static_library',
+      'include_dirs': [
+        '.',
+      ],
+      'direct_dependent_settings': {
+        'include_dirs': [
+          '.', # Some includes are hierarchical
+        ],
+      },
+      'dependencies': [
+        '<(webrtc_root)/../testing/gtest.gyp:gtest',
+        '<(webrtc_root)/../testing/gmock.gyp:gmock',
+      ],
+      'all_dependent_settings': {
+        'include_dirs': [
+          '.',
+        ],
+      },
+      'sources': [
+        'test_suite.cc',
+        'test_suite.h',
+        'testsupport/fileutils.h',
+        'testsupport/fileutils.cc',
+        'testsupport/frame_reader.h',
+        'testsupport/frame_reader.cc',
+        'testsupport/frame_writer.h',
+        'testsupport/frame_writer.cc',
+        'testsupport/packet_reader.h',
+        'testsupport/packet_reader.cc',
+        'testsupport/mock/mock_frame_reader.h',
+        'testsupport/mock/mock_frame_writer.h',
+      ],
+    },
+    {
+      # Depend on this target when you want to have test_support but also the
+      # main method needed for gtest to execute!
+      'target_name': 'test_support_main',
+      'type': 'static_library',
+      'dependencies': [
+        'test_support',
+      ],
+      'sources': [
+        'run_all_unittests.cc',
+      ],
+    },
+    {
+      'target_name': 'test_support_unittests',
+      'type': 'executable',
+      'dependencies': [
+        'test_support_main',
+        '<(webrtc_root)/../testing/gtest.gyp:gtest',
+      ],
+      'sources': [
+        'testsupport/unittest_utils.h',
+        'testsupport/fileutils_unittest.cc',
+        'testsupport/frame_reader_unittest.cc',
+        'testsupport/frame_writer_unittest.cc',
+        'testsupport/packet_reader_unittest.cc',
+      ],
+    },
+  ],
+}
diff --git a/test/test_suite.cc b/test/test_suite.cc
new file mode 100644
index 0000000..ac3f3a2
--- /dev/null
+++ b/test/test_suite.cc
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "test/test_suite.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace webrtc {
+namespace test {
+TestSuite::TestSuite(int argc, char** argv) {
+  testing::InitGoogleMock(&argc, argv);  // Runs InitGoogleTest() internally.
+}
+
+TestSuite::~TestSuite() {
+}
+
+int TestSuite::Run() {
+  Initialize();
+  int result = RUN_ALL_TESTS();
+  Shutdown();
+  return result;
+}
+
+void TestSuite::Initialize() {
+  // TODO(andrew): initialize singletons here (e.g. Trace).
+}
+
+void TestSuite::Shutdown() {
+}
+}  // namespace test
+}  // namespace webrtc
diff --git a/test/test_suite.h b/test/test_suite.h
new file mode 100644
index 0000000..f500daa
--- /dev/null
+++ b/test/test_suite.h
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef TEST_TEST_SUITE_H_
+#define TEST_TEST_SUITE_H_
+
+// Derived from Chromium's src/base/test/test_suite.h.
+
+// Defines a basic test suite framework for running gtest based tests.  You can
+// instantiate this class in your main function and call its Run method to run
+// any gtest based tests that are linked into your executable.
+
+#include "src/system_wrappers/interface/constructor_magic.h"
+
+namespace webrtc {
+namespace test {
+class TestSuite {
+ public:
+  TestSuite(int argc, char** argv);
+  virtual ~TestSuite();
+
+  int Run();
+
+ protected:
+  // Override these for custom initialization and shutdown handling.  Use these
+  // instead of putting complex code in your constructor/destructor.
+  virtual void Initialize();
+  virtual void Shutdown();
+
+  DISALLOW_COPY_AND_ASSIGN(TestSuite);
+};
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // TEST_TEST_SUITE_H_
diff --git a/test/testsupport/fileutils.cc b/test/testsupport/fileutils.cc
new file mode 100644
index 0000000..1e6bbca
--- /dev/null
+++ b/test/testsupport/fileutils.cc
@@ -0,0 +1,167 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testsupport/fileutils.h"
+
+#ifdef WIN32
+#include <direct.h>
+#define GET_CURRENT_DIR _getcwd
+#else
+#include <unistd.h>
+#define GET_CURRENT_DIR getcwd
+#endif
+
+#include <sys/stat.h>  // To check for directory existence.
+#ifndef S_ISDIR  // Not defined in stat.h on Windows.
+#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
+#endif
+
+#include <cstdio>
+
+#include "typedefs.h"  // For architecture defines
+
+namespace webrtc {
+namespace test {
+
+#ifdef WIN32
+static const char* kPathDelimiter = "\\";
+#else
+static const char* kPathDelimiter = "/";
+#endif
+// The file we're looking for to identify the project root dir.
+static const char* kProjectRootFileName = "DEPS";
+static const char* kOutputDirName = "out";
+static const char* kFallbackPath = "./";
+static const char* kResourcesDirName = "resources";
+const char* kCannotFindProjectRootDir = "ERROR_CANNOT_FIND_PROJECT_ROOT_DIR";
+
+std::string ProjectRootPath() {
+  std::string working_dir = WorkingDir();
+  if (working_dir == kFallbackPath) {
+    return kCannotFindProjectRootDir;
+  }
+  // Check for our file that verifies the root dir.
+  std::string current_path(working_dir);
+  FILE* file = NULL;
+  int path_delimiter_index = current_path.find_last_of(kPathDelimiter);
+  while (path_delimiter_index > -1) {
+    std::string root_filename = current_path + kPathDelimiter +
+        kProjectRootFileName;
+    file = fopen(root_filename.c_str(), "r");
+    if (file != NULL) {
+      fclose(file);
+      return current_path + kPathDelimiter;
+    }
+    // Move up one directory in the directory tree.
+    current_path = current_path.substr(0, path_delimiter_index);
+    path_delimiter_index = current_path.find_last_of(kPathDelimiter);
+  }
+  // Reached the root directory.
+  fprintf(stderr, "Cannot find project root directory!\n");
+  return kCannotFindProjectRootDir;
+}
+
+std::string OutputPath() {
+  std::string path = ProjectRootPath();
+  if (path == kCannotFindProjectRootDir) {
+    return kFallbackPath;
+  }
+  path += kOutputDirName;
+  if (!CreateDirectory(path)) {
+    return kFallbackPath;
+  }
+  return path + kPathDelimiter;
+}
+
+std::string WorkingDir() {
+  char path_buffer[FILENAME_MAX];
+  if (!GET_CURRENT_DIR(path_buffer, sizeof(path_buffer))) {
+    fprintf(stderr, "Cannot get current directory!\n");
+    return kFallbackPath;
+  } else {
+    return std::string(path_buffer);
+  }
+}
+
+bool CreateDirectory(std::string directory_name) {
+  struct stat path_info = {0};
+  // Check if the path exists already:
+  if (stat(directory_name.c_str(), &path_info) == 0) {
+    if (!S_ISDIR(path_info.st_mode)) {
+      fprintf(stderr, "Path %s exists but is not a directory! Remove this "
+              "file and re-run to create the directory.\n",
+              directory_name.c_str());
+      return false;
+    }
+  } else {
+#ifdef WIN32
+    return _mkdir(directory_name.c_str()) == 0;
+#else
+    return mkdir(directory_name.c_str(),  S_IRWXU | S_IRWXG | S_IRWXO) == 0;
+#endif
+  }
+  return true;
+}
+
+bool FileExists(std::string file_name) {
+  struct stat file_info = {0};
+  return stat(file_name.c_str(), &file_info) == 0;
+}
+
+std::string ResourcePath(std::string name, std::string extension) {
+  std::string platform = "win";
+#ifdef WEBRTC_LINUX
+  platform = "linux";
+#endif  // WEBRTC_LINUX
+#ifdef WEBRTC_MAC
+  platform = "mac";
+#endif  // WEBRTC_MAC
+
+#ifdef WEBRTC_ARCH_64_BITS
+  std::string architecture = "64";
+#else
+  std::string architecture = "32";
+#endif  // WEBRTC_ARCH_64_BITS
+
+  std::string resources_path = ProjectRootPath() + kResourcesDirName +
+      kPathDelimiter;
+  std::string resource_file = resources_path + name + "_" + platform + "_" +
+      architecture + "." + extension;
+  if (FileExists(resource_file)) {
+    return resource_file;
+  }
+  // Try without architecture.
+  resource_file = resources_path + name + "_" + platform + "." + extension;
+  if (FileExists(resource_file)) {
+    return resource_file;
+  }
+  // Try without platform.
+  resource_file = resources_path + name + "_" + architecture + "." + extension;
+  if (FileExists(resource_file)) {
+    return resource_file;
+  }
+  // Fall back on name without architecture or platform.
+  return resources_path + name + "." + extension;
+}
+
+size_t GetFileSize(std::string filename) {
+  FILE* f = fopen(filename.c_str(), "rb");
+  size_t size = 0;
+  if (f != NULL) {
+    if (fseek(f, 0, SEEK_END) == 0) {
+      size = ftell(f);
+    }
+    fclose(f);
+  }
+  return size;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/test/testsupport/fileutils.h b/test/testsupport/fileutils.h
new file mode 100644
index 0000000..c89ac29
--- /dev/null
+++ b/test/testsupport/fileutils.h
@@ -0,0 +1,143 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cstdio>
+
+// File utilities for testing purposes.
+//
+// The ProjectRootPath() method is a convenient way of getting an absolute
+// path to the project source tree root directory. Using this, it is easy to
+// refer to test resource files in a portable way.
+//
+// Notice that even if Windows platforms use backslash as path delimiter, it is
+// also supported to use slash, so there's no need for #ifdef checks in test
+// code for setting up the paths to the resource files.
+//
+// Example use:
+// Assume we have the following code being used in a test source file:
+// const std::string kInputFile = webrtc::test::ProjectRootPath() +
+//     "test/data/voice_engine/audio_long16.wav";
+// // Use the kInputFile for the tests...
+//
+// Then here's some example outputs for different platforms:
+// Linux:
+// * Source tree located in /home/user/webrtc/trunk
+// * Test project located in /home/user/webrtc/trunk/src/testproject
+// * Test binary compiled as:
+//   /home/user/webrtc/trunk/out/Debug/testproject_unittests
+// Then ProjectRootPath() will return /home/user/webrtc/trunk/ no matter if
+// the test binary is executed from standing in either of:
+// /home/user/webrtc/trunk
+// or
+// /home/user/webrtc/trunk/out/Debug
+// (or any other directory below the trunk for that matter).
+//
+// Windows:
+// * Source tree located in C:\Users\user\webrtc\trunk
+// * Test project located in C:\Users\user\webrtc\trunk\src\testproject
+// * Test binary compiled as:
+//   C:\Users\user\webrtc\trunk\src\testproject\Debug\testproject_unittests.exe
+// Then ProjectRootPath() will return C:\Users\user\webrtc\trunk\ when the
+// test binary is executed from inside Visual Studio.
+// It will also return the same path if the test is executed from a command
+// prompt standing in C:\Users\user\webrtc\trunk\src\testproject\Debug
+//
+// Mac:
+// * Source tree located in /Users/user/webrtc/trunk
+// * Test project located in /Users/user/webrtc/trunk/src/testproject
+// * Test binary compiled as:
+//   /Users/user/webrtc/trunk/xcodebuild/Debug/testproject_unittests
+// Then ProjectRootPath() will return /Users/user/webrtc/trunk/ no matter if
+// the test binary is executed from standing in either of:
+// /Users/user/webrtc/trunk
+// or
+// /Users/user/webrtc/trunk/out/Debug
+// (or any other directory below the trunk for that matter).
+
+#ifndef WEBRTC_TEST_TESTSUPPORT_FILEUTILS_H_
+#define WEBRTC_TEST_TESTSUPPORT_FILEUTILS_H_
+
+#include <string>
+
+namespace webrtc {
+namespace test {
+
+// This is the "directory" returned if the ProjectPath() function fails
+// to find the project root.
+extern const char* kCannotFindProjectRootDir;
+
+// Finds the root dir of the project, to be able to set correct paths to
+// resource files used by tests.
+// The implementation is simple: it just looks for the file defined by
+// kProjectRootFileName, starting in the current directory (the working
+// directory) and then steps upward until it is found (or it is at the root of
+// the file system).
+// If the current working directory is above the project root dir, it will not
+// be found.
+//
+// If symbolic links occur in the path they will be resolved and the actual
+// directory will be returned.
+//
+// Returns the absolute path to the project root dir (usually the trunk dir)
+// WITH a trailing path delimiter.
+// If the project root is not found, the string specified by
+// kCannotFindProjectRootDir is returned.
+std::string ProjectRootPath();
+
+// Creates and returns the absolute path to the output directory where log files
+// and other test artifacts should be put. The output directory is always a
+// directory named "out" at the top-level of the project, i.e. a subfolder to
+// the path returned by ProjectRootPath().
+//
+// Details described for ProjectRootPath() apply here too.
+//
+// Returns the path WITH a trailing path delimiter. If the project root is not
+// found, the current working directory ("./") is returned as a fallback.
+std::string OutputPath();
+
+// Returns a path to a resource file for the currently executing platform.
+// Adapts to what filenames are currently present in the
+// [project-root]/resources/ dir.
+// Returns an absolute path according to this priority list (the directory
+// part of the path is left out for readability):
+// 1. [name]_[platform]_[architecture].[extension]
+// 2. [name]_[platform].[extension]
+// 3. [name]_[architecture].[extension]
+// 4. [name].[extension]
+// Where
+// * platform is either of "win", "mac" or "linux".
+// * architecture is either of "32" or "64".
+//
+// Arguments:
+//    name - Name of the resource file. If a plain filename (no directory path)
+//           is supplied, the file is assumed to be located in resources/
+//           If a directory path is prepended to the filename, a subdirectory
+//           hierarchy reflecting that path is assumed to be present.
+//    extension - File extension, without the dot, i.e. "bmp" or "yuv".
+std::string ResourcePath(std::string name, std::string extension);
+
+// Gets the current working directory for the executing program.
+// Returns "./" if for some reason it is not possible to find the working
+// directory.
+std::string WorkingDir();
+
+// Creates a directory if it not already exists.
+// Returns true if successful. Will print an error message to stderr and return
+// false if a file with the same name already exists.
+bool CreateDirectory(std::string directory_name);
+
+// File size of the supplied file in bytes. Will return 0 if the file is
+// empty or if the file does not exist/is readable.
+size_t GetFileSize(std::string filename);
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // WEBRTC_TEST_TESTSUPPORT_FILEUTILS_H_
diff --git a/test/testsupport/fileutils_unittest.cc b/test/testsupport/fileutils_unittest.cc
new file mode 100644
index 0000000..a500a07
--- /dev/null
+++ b/test/testsupport/fileutils_unittest.cc
@@ -0,0 +1,191 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testsupport/fileutils.h"
+
+#include <cstdio>
+#include <list>
+#include <string>
+
+#include "gtest/gtest.h"
+
+#ifdef WIN32
+static const char* kPathDelimiter = "\\";
+#else
+static const char* kPathDelimiter = "/";
+#endif
+
+static const std::string kDummyDir = "file_utils_unittest_dummy_dir";
+static const std::string kResourcesDir = "resources";
+static const std::string kTestName = "fileutils_unittest";
+static const std::string kExtension = "tmp";
+
+typedef std::list<std::string> FileList;
+
+namespace webrtc {
+
+// Test fixture to restore the working directory between each test, since some
+// of them change it with chdir during execution (not restored by the
+// gtest framework).
+class FileUtilsTest : public testing::Test {
+ protected:
+  FileUtilsTest() {
+  }
+  virtual ~FileUtilsTest() {}
+  // Runs before the first test
+  static void SetUpTestCase() {
+    original_working_dir_ = webrtc::test::WorkingDir();
+    std::string resources_path = original_working_dir_ + kPathDelimiter +
+        kResourcesDir + kPathDelimiter;
+    webrtc::test::CreateDirectory(resources_path);
+
+    files_.push_back(resources_path + kTestName + "." + kExtension);
+    files_.push_back(resources_path + kTestName + "_32." + kExtension);
+    files_.push_back(resources_path + kTestName + "_64." + kExtension);
+    files_.push_back(resources_path + kTestName + "_linux." + kExtension);
+    files_.push_back(resources_path + kTestName + "_mac." + kExtension);
+    files_.push_back(resources_path + kTestName + "_win." + kExtension);
+    files_.push_back(resources_path + kTestName + "_linux_32." + kExtension);
+    files_.push_back(resources_path + kTestName + "_mac_32." + kExtension);
+    files_.push_back(resources_path + kTestName + "_win_32." + kExtension);
+    files_.push_back(resources_path + kTestName + "_linux_64." + kExtension);
+    files_.push_back(resources_path + kTestName + "_mac_64." + kExtension);
+    files_.push_back(resources_path + kTestName + "_win_64." + kExtension);
+
+    // Now that the resources dir exists, write some empty test files into it.
+    for (FileList::iterator file_it = files_.begin();
+        file_it != files_.end(); ++file_it) {
+      FILE* file = fopen(file_it->c_str(), "wb");
+      ASSERT_TRUE(file != NULL) << "Failed to write file: " << file_it->c_str();
+      ASSERT_GT(fprintf(file, "%s",  "Dummy data"), 0);
+      fclose(file);
+    }
+    // Create a dummy subdir that can be chdir'ed into for testing purposes.
+    empty_dummy_dir_ = original_working_dir_ + kPathDelimiter + kDummyDir;
+    webrtc::test::CreateDirectory(empty_dummy_dir_);
+  }
+  static void TearDownTestCase() {
+    // Clean up all resource files written
+    for (FileList::iterator file_it = files_.begin();
+            file_it != files_.end(); ++file_it) {
+      remove(file_it->c_str());
+    }
+    std::remove(empty_dummy_dir_.c_str());
+  }
+  void SetUp() {
+    ASSERT_EQ(chdir(original_working_dir_.c_str()), 0);
+  }
+  void TearDown() {
+    ASSERT_EQ(chdir(original_working_dir_.c_str()), 0);
+  }
+ protected:
+  static FileList files_;
+  static std::string empty_dummy_dir_;
+ private:
+  static std::string original_working_dir_;
+};
+
+FileList FileUtilsTest::files_;
+std::string FileUtilsTest::original_working_dir_ = "";
+std::string FileUtilsTest::empty_dummy_dir_ = "";
+
+// Tests that the project root path is returned for the default working
+// directory that is automatically set when the test executable is launched.
+// The test is not fully testing the implementation, since we cannot be sure
+// of where the executable was launched from.
+// The test will fail if the top level directory is not named "trunk".
+TEST_F(FileUtilsTest, ProjectRootPathFromUnchangedWorkingDir) {
+  std::string path = webrtc::test::ProjectRootPath();
+  std::string expected_end = "trunk";
+  expected_end = kPathDelimiter + expected_end + kPathDelimiter;
+  ASSERT_EQ(path.length() - expected_end.length(), path.find(expected_end));
+}
+
+// Similar to the above test, but for the output dir
+TEST_F(FileUtilsTest, OutputPathFromUnchangedWorkingDir) {
+  std::string path = webrtc::test::OutputPath();
+  std::string expected_end = "out";
+  expected_end = kPathDelimiter + expected_end + kPathDelimiter;
+  ASSERT_EQ(path.length() - expected_end.length(), path.find(expected_end));
+}
+
+// Tests setting the current working directory to a directory three levels
+// deeper from the current one. Then testing that the project path returned
+// is still the same, when the function under test is called again.
+TEST_F(FileUtilsTest, ProjectRootPathFromDeeperWorkingDir) {
+  std::string path = webrtc::test::ProjectRootPath();
+  std::string original_working_dir = path;  // This is the correct project root
+  // Change to a subdirectory path.
+  ASSERT_EQ(0, chdir(empty_dummy_dir_.c_str()));
+  ASSERT_EQ(original_working_dir, webrtc::test::ProjectRootPath());
+}
+
+// Similar to the above test, but for the output dir
+TEST_F(FileUtilsTest, OutputPathFromDeeperWorkingDir) {
+  std::string path = webrtc::test::OutputPath();
+  std::string original_working_dir = path;
+  ASSERT_EQ(0, chdir(empty_dummy_dir_.c_str()));
+  ASSERT_EQ(original_working_dir, webrtc::test::OutputPath());
+}
+
+// Tests with current working directory set to a directory higher up in the
+// directory tree than the project root dir. This case shall return a specified
+// error string as a directory (which will be an invalid path).
+TEST_F(FileUtilsTest, ProjectRootPathFromRootWorkingDir) {
+  // Change current working dir to the root of the current file system
+  // (this will always be "above" our project root dir).
+  ASSERT_EQ(0, chdir(kPathDelimiter));
+  ASSERT_EQ(webrtc::test::kCannotFindProjectRootDir,
+            webrtc::test::ProjectRootPath());
+}
+
+// Similar to the above test, but for the output dir
+TEST_F(FileUtilsTest, OutputPathFromRootWorkingDir) {
+  ASSERT_EQ(0, chdir(kPathDelimiter));
+  ASSERT_EQ("./", webrtc::test::OutputPath());
+}
+
+// Only tests that the code executes
+TEST_F(FileUtilsTest, CreateDirectory) {
+  std::string directory = "fileutils-unittest-empty-dir";
+  // Make sure it's removed if a previous test has failed:
+  std::remove(directory.c_str());
+  ASSERT_TRUE(webrtc::test::CreateDirectory(directory));
+  std::remove(directory.c_str());
+}
+
+TEST_F(FileUtilsTest, WorkingDirReturnsValue) {
+  // Hard to cover all platforms. Just test that it returns something without
+  // crashing:
+  std::string working_dir = webrtc::test::WorkingDir();
+  ASSERT_GT(working_dir.length(), 0u);
+}
+
+// Due to multiple platforms, it is hard to make a complete test for
+// ResourcePath. Manual testing has been performed by removing files and
+// verified the result confirms with the specified documentation for the
+// function.
+TEST_F(FileUtilsTest, ResourcePathReturnsValue) {
+  std::string resource = webrtc::test::ResourcePath(kTestName, kExtension);
+  ASSERT_GT(resource.find(kTestName), 0u);
+  ASSERT_GT(resource.find(kExtension), 0u);
+  ASSERT_EQ(0, chdir(kPathDelimiter));
+  ASSERT_EQ("./", webrtc::test::OutputPath());
+}
+
+TEST_F(FileUtilsTest, GetFileSizeExistingFile) {
+  ASSERT_GT(webrtc::test::GetFileSize(files_.front()), 0u);
+}
+
+TEST_F(FileUtilsTest, GetFileSizeNonExistingFile) {
+  ASSERT_EQ(0u, webrtc::test::GetFileSize("non-existing-file.tmp"));
+}
+
+}  // namespace webrtc
diff --git a/test/testsupport/frame_reader.cc b/test/testsupport/frame_reader.cc
new file mode 100644
index 0000000..b05ea58
--- /dev/null
+++ b/test/testsupport/frame_reader.cc
@@ -0,0 +1,81 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testsupport/frame_reader.h"
+
+#include <cassert>
+
+#include "testsupport/fileutils.h"
+
+namespace webrtc {
+namespace test {
+
+FrameReaderImpl::FrameReaderImpl(std::string input_filename,
+                                 int frame_length_in_bytes)
+    : input_filename_(input_filename),
+      frame_length_in_bytes_(frame_length_in_bytes),
+      input_file_(NULL) {
+}
+
+FrameReaderImpl::~FrameReaderImpl() {
+  Close();
+}
+
+bool FrameReaderImpl::Init() {
+  if (frame_length_in_bytes_ <= 0) {
+    fprintf(stderr, "Frame length must be >0, was %d\n",
+            frame_length_in_bytes_);
+    return false;
+  }
+  input_file_ = fopen(input_filename_.c_str(), "rb");
+  if (input_file_ == NULL) {
+    fprintf(stderr, "Couldn't open input file for reading: %s\n",
+            input_filename_.c_str());
+    return false;
+  }
+  // Calculate total number of frames.
+  size_t source_file_size = GetFileSize(input_filename_);
+  if (source_file_size <= 0u) {
+    fprintf(stderr, "Found empty file: %s\n", input_filename_.c_str());
+    return false;
+  }
+  number_of_frames_ = source_file_size / frame_length_in_bytes_;
+  return true;
+}
+
+void FrameReaderImpl::Close() {
+  if (input_file_ != NULL) {
+    fclose(input_file_);
+    input_file_ = NULL;
+  }
+}
+
+bool FrameReaderImpl::ReadFrame(WebRtc_UWord8* source_buffer) {
+  assert(source_buffer);
+  if (input_file_ == NULL) {
+    fprintf(stderr, "FrameReader is not initialized (input file is NULL)\n");
+    return false;
+  }
+  size_t nbr_read = fread(source_buffer, 1, frame_length_in_bytes_,
+                          input_file_);
+  if (nbr_read != static_cast<unsigned int>(frame_length_in_bytes_) &&
+      ferror(input_file_)) {
+    fprintf(stderr, "Error reading from input file: %s\n",
+            input_filename_.c_str());
+    return false;
+  }
+  if (feof(input_file_) != 0) {
+    return false;  // No more frames to process.
+  }
+  return true;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/test/testsupport/frame_reader.h b/test/testsupport/frame_reader.h
new file mode 100644
index 0000000..56d8fc4
--- /dev/null
+++ b/test/testsupport/frame_reader.h
@@ -0,0 +1,73 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_TEST_TESTSUPPORT_FRAME_READER_H_
+#define WEBRTC_TEST_TESTSUPPORT_FRAME_READER_H_
+
+#include <cstdio>
+#include <string>
+
+#include "typedefs.h"
+
+namespace webrtc {
+namespace test {
+
+// Handles reading of frames from video files.
+class FrameReader {
+ public:
+  virtual ~FrameReader() {}
+
+  // Initializes the frame reader, i.e. opens the input file.
+  // This must be called before reading of frames has started.
+  // Returns false if an error has occurred, in addition to printing to stderr.
+  virtual bool Init() = 0;
+
+  // Reads a frame into the supplied buffer, which must contain enough space
+  // for the frame size.
+  // Returns true if there are more frames to read, false if we've already
+  // read the last frame (in the previous call).
+  virtual bool ReadFrame(WebRtc_UWord8* source_buffer) = 0;
+
+  // Closes the input file if open. Essentially makes this class impossible
+  // to use anymore. Will also be invoked by the destructor.
+  virtual void Close() = 0;
+
+  // Frame length in bytes of a single frame image.
+  virtual int FrameLength() = 0;
+  // Total number of frames in the input video source.
+  virtual int NumberOfFrames() = 0;
+};
+
+class FrameReaderImpl : public FrameReader {
+ public:
+  // Creates a file handler. The input file is assumed to exist and be readable.
+  // Parameters:
+  //   input_filename          The file to read from.
+  //   frame_length_in_bytes   The size of each frame.
+  //                           For YUV this is 3 * width * height / 2
+  FrameReaderImpl(std::string input_filename, int frame_length_in_bytes);
+  virtual ~FrameReaderImpl();
+  bool Init();
+  bool ReadFrame(WebRtc_UWord8* source_buffer);
+  void Close();
+  int FrameLength() { return frame_length_in_bytes_; }
+  int NumberOfFrames() { return number_of_frames_; }
+
+ private:
+  std::string input_filename_;
+  int frame_length_in_bytes_;
+  int number_of_frames_;
+  FILE* input_file_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // WEBRTC_TEST_TESTSUPPORT_FRAME_READER_H_
diff --git a/test/testsupport/frame_reader_unittest.cc b/test/testsupport/frame_reader_unittest.cc
new file mode 100644
index 0000000..f1da5ce
--- /dev/null
+++ b/test/testsupport/frame_reader_unittest.cc
@@ -0,0 +1,72 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testsupport/frame_reader.h"
+
+#include "gtest/gtest.h"
+#include "testsupport/fileutils.h"
+
+namespace webrtc {
+namespace test {
+
+const std::string kInputFilename = "temp_inputfile.tmp";
+const std::string kInputFileContents = "baz";
+// Setting the kFrameLength value to a value much larger than the
+// file to test causes the ReadFrame test to fail on Windows.
+const int kFrameLength = 1000;
+
+class FrameReaderTest: public testing::Test {
+ protected:
+  FrameReaderTest() {}
+  virtual ~FrameReaderTest() {}
+  void SetUp() {
+    // Cleanup any previous dummy input file.
+    std::remove(kInputFilename.c_str());
+
+    // Create a dummy input file.
+    FILE* dummy = fopen(kInputFilename.c_str(), "wb");
+    fprintf(dummy, "%s", kInputFileContents.c_str());
+    fclose(dummy);
+
+    frame_reader_ = new FrameReaderImpl(kInputFilename, kFrameLength);
+    ASSERT_TRUE(frame_reader_->Init());
+  }
+  void TearDown() {
+    delete frame_reader_;
+    // Cleanup the dummy input file.
+    std::remove(kInputFilename.c_str());
+  }
+  FrameReader* frame_reader_;
+};
+
+TEST_F(FrameReaderTest, InitSuccess) {
+  FrameReaderImpl frame_reader(kInputFilename, kFrameLength);
+  ASSERT_TRUE(frame_reader.Init());
+  ASSERT_EQ(kFrameLength, frame_reader.FrameLength());
+  ASSERT_EQ(0, frame_reader.NumberOfFrames());
+}
+
+TEST_F(FrameReaderTest, ReadFrame) {
+  WebRtc_UWord8 buffer[3];
+  bool result = frame_reader_->ReadFrame(buffer);
+  ASSERT_FALSE(result);  // No more files to read.
+  ASSERT_EQ(kInputFileContents[0], buffer[0]);
+  ASSERT_EQ(kInputFileContents[1], buffer[1]);
+  ASSERT_EQ(kInputFileContents[2], buffer[2]);
+}
+
+TEST_F(FrameReaderTest, ReadFrameUninitialized) {
+  WebRtc_UWord8 buffer[3];
+  FrameReaderImpl file_reader(kInputFilename, kFrameLength);
+  ASSERT_FALSE(file_reader.ReadFrame(buffer));
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/test/testsupport/frame_writer.cc b/test/testsupport/frame_writer.cc
new file mode 100644
index 0000000..5f32539
--- /dev/null
+++ b/test/testsupport/frame_writer.cc
@@ -0,0 +1,68 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testsupport/frame_writer.h"
+
+#include <cassert>
+
+namespace webrtc {
+namespace test {
+
+FrameWriterImpl::FrameWriterImpl(std::string output_filename,
+                                 int frame_length_in_bytes)
+    : output_filename_(output_filename),
+      frame_length_in_bytes_(frame_length_in_bytes),
+      output_file_(NULL) {
+}
+
+FrameWriterImpl::~FrameWriterImpl() {
+  Close();
+}
+
+bool FrameWriterImpl::Init() {
+  if (frame_length_in_bytes_ <= 0) {
+    fprintf(stderr, "Frame length must be >0, was %d\n",
+            frame_length_in_bytes_);
+    return false;
+  }
+  output_file_ = fopen(output_filename_.c_str(), "wb");
+  if (output_file_ == NULL) {
+    fprintf(stderr, "Couldn't open output file for writing: %s\n",
+            output_filename_.c_str());
+    return false;
+  }
+  return true;
+}
+
+void FrameWriterImpl::Close() {
+  if (output_file_ != NULL) {
+    fclose(output_file_);
+    output_file_ = NULL;
+  }
+}
+
+bool FrameWriterImpl::WriteFrame(WebRtc_UWord8* frame_buffer) {
+  assert(frame_buffer);
+  if (output_file_ == NULL) {
+    fprintf(stderr, "FrameWriter is not initialized (output file is NULL)\n");
+    return false;
+  }
+  int bytes_written = fwrite(frame_buffer, 1, frame_length_in_bytes_,
+                             output_file_);
+  if (bytes_written != frame_length_in_bytes_) {
+    fprintf(stderr, "Failed to write %d bytes to file %s\n",
+            frame_length_in_bytes_, output_filename_.c_str());
+    return false;
+  }
+  return true;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/test/testsupport/frame_writer.h b/test/testsupport/frame_writer.h
new file mode 100644
index 0000000..abc5d35
--- /dev/null
+++ b/test/testsupport/frame_writer.h
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_TEST_TESTSUPPORT_FRAME_WRITER_H_
+#define WEBRTC_TEST_TESTSUPPORT_FRAME_WRITER_H_
+
+#include <cstdio>
+#include <string>
+
+#include "typedefs.h"
+
+namespace webrtc {
+namespace test {
+
+// Handles writing of video files.
+class FrameWriter {
+ public:
+  virtual ~FrameWriter() {}
+
+  // Initializes the file handler, i.e. opens the input and output files etc.
+  // This must be called before reading or writing frames has started.
+  // Returns false if an error has occurred, in addition to printing to stderr.
+  virtual bool Init() = 0;
+
+  // Writes a frame of the configured frame length to the output file.
+  // Returns true if the write was successful, false otherwise.
+  virtual bool WriteFrame(WebRtc_UWord8* frame_buffer) = 0;
+
+  // Closes the output file if open. Essentially makes this class impossible
+  // to use anymore. Will also be invoked by the destructor.
+  virtual void Close() = 0;
+
+  // Frame length in bytes of a single frame image.
+  virtual int FrameLength() = 0;
+};
+
+class FrameWriterImpl : public FrameWriter {
+ public:
+  // Creates a file handler. The input file is assumed to exist and be readable
+  // and the output file must be writable.
+  // Parameters:
+  //   output_filename         The file to write. Will be overwritten if already
+  //                           existing.
+  //   frame_length_in_bytes   The size of each frame.
+  //                           For YUV: 3*width*height/2
+  FrameWriterImpl(std::string output_filename, int frame_length_in_bytes);
+  virtual ~FrameWriterImpl();
+  bool Init();
+  bool WriteFrame(WebRtc_UWord8* frame_buffer);
+  void Close();
+  int FrameLength() { return frame_length_in_bytes_; }
+
+ private:
+  std::string output_filename_;
+  int frame_length_in_bytes_;
+  int number_of_frames_;
+  FILE* output_file_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // WEBRTC_TEST_TESTSUPPORT_FRAME_WRITER_H_
diff --git a/test/testsupport/frame_writer_unittest.cc b/test/testsupport/frame_writer_unittest.cc
new file mode 100644
index 0000000..d25d1d2
--- /dev/null
+++ b/test/testsupport/frame_writer_unittest.cc
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testsupport/frame_writer.h"
+
+#include "gtest/gtest.h"
+#include "testsupport/fileutils.h"
+
+namespace webrtc {
+namespace test {
+
+const std::string kOutputFilename = "temp_outputfile.tmp";
+const int kFrameLength = 1000;
+
+class FrameWriterTest: public testing::Test {
+ protected:
+  FrameWriterTest() {}
+  virtual ~FrameWriterTest() {}
+  void SetUp() {
+    // Cleanup any previous output file.
+    std::remove(kOutputFilename.c_str());
+    frame_writer_ = new FrameWriterImpl(kOutputFilename, kFrameLength);
+    ASSERT_TRUE(frame_writer_->Init());
+  }
+  void TearDown() {
+    delete frame_writer_;
+    // Cleanup the temporary file.
+    std::remove(kOutputFilename.c_str());
+  }
+  FrameWriter* frame_writer_;
+};
+
+TEST_F(FrameWriterTest, InitSuccess) {
+  FrameWriterImpl frame_writer(kOutputFilename, kFrameLength);
+  ASSERT_TRUE(frame_writer.Init());
+  ASSERT_EQ(kFrameLength, frame_writer.FrameLength());
+}
+
+TEST_F(FrameWriterTest, WriteFrame) {
+  WebRtc_UWord8 buffer[kFrameLength];
+  memset(buffer, 9, kFrameLength);  // Write lots of 9s to the buffer
+  bool result = frame_writer_->WriteFrame(buffer);
+  ASSERT_TRUE(result);  // success
+  // Close the file and verify the size.
+  frame_writer_->Close();
+  ASSERT_EQ(kFrameLength,
+            static_cast<int>(GetFileSize(kOutputFilename)));
+}
+
+TEST_F(FrameWriterTest, WriteFrameUninitialized) {
+  WebRtc_UWord8 buffer[3];
+  FrameWriterImpl frame_writer(kOutputFilename, kFrameLength);
+  ASSERT_FALSE(frame_writer.WriteFrame(buffer));
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/test/testsupport/metrics/video_metrics.cc b/test/testsupport/metrics/video_metrics.cc
new file mode 100644
index 0000000..9e61ec8
--- /dev/null
+++ b/test/testsupport/metrics/video_metrics.cc
@@ -0,0 +1,187 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testsupport/metrics/video_metrics.h"
+
+#include <algorithm> // min_element, max_element
+#include <cassert>
+#include <cstdio>
+
+#include "common_video/libyuv/include/libyuv.h"
+
+namespace webrtc {
+namespace test {
+
+// Used for calculating min and max values
+static bool LessForFrameResultValue (const FrameResult& s1,
+                                     const FrameResult& s2) {
+    return s1.value < s2.value;
+}
+
+enum VideoMetricsType { kPSNR, kSSIM, kBoth };
+
+// Calculates metrics for a frame and adds statistics to the result for it.
+void CalculateFrame(VideoMetricsType video_metrics_type,
+                    uint8_t* ref,
+                    uint8_t* test,
+                    int width,
+                    int height,
+                    int frame_number,
+                    QualityMetricsResult* result) {
+  FrameResult frame_result;
+  frame_result.frame_number = frame_number;
+  switch (video_metrics_type) {
+    case kPSNR:
+      frame_result.value = I420PSNR(ref, test, width, height);
+      break;
+    case kSSIM:
+      frame_result.value = I420SSIM(ref, test, width, height);
+      break;
+    default:
+      assert(false);
+  }
+  result->frames.push_back(frame_result);
+}
+
+// Calculates average, min and max values for the supplied struct, if non-NULL.
+void CalculateStats(QualityMetricsResult* result) {
+  if (result == NULL || result->frames.size() == 0) {
+    return;
+  }
+  // Calculate average
+  std::vector<FrameResult>::iterator iter;
+  double metrics_values_sum = 0.0;
+  for (iter = result->frames.begin(); iter != result->frames.end(); ++iter) {
+    metrics_values_sum += iter->value;
+  }
+  result->average = metrics_values_sum / result->frames.size();
+
+  // Calculate min/max statistics
+  iter = min_element(result->frames.begin(), result->frames.end(),
+                     LessForFrameResultValue);
+  result->min = iter->value;
+  result->min_frame_number = iter->frame_number;
+  iter = max_element(result->frames.begin(), result->frames.end(),
+                     LessForFrameResultValue);
+  result->max = iter->value;
+  result->max_frame_number = iter->frame_number;
+}
+
+// Single method that handles all combinations of video metrics calculation, to
+// minimize code duplication. Either psnr_result or ssim_result may be NULL,
+// depending on which VideoMetricsType is targeted.
+int CalculateMetrics(VideoMetricsType video_metrics_type,
+                     const char* ref_filename,
+                     const char* test_filename,
+                     int width,
+                     int height,
+                     QualityMetricsResult* psnr_result,
+                     QualityMetricsResult* ssim_result) {
+  assert(ref_filename != NULL);
+  assert(test_filename != NULL);
+  assert(width > 0);
+  assert(height > 0);
+
+  FILE* ref_fp = fopen(ref_filename, "rb");
+  if (ref_fp == NULL) {
+    // cannot open reference file
+    fprintf(stderr, "Cannot open file %s\n", ref_filename);
+    return -1;
+  }
+  FILE* test_fp = fopen(test_filename, "rb");
+  if (test_fp == NULL) {
+    // cannot open test file
+    fprintf(stderr, "Cannot open file %s\n", test_filename);
+    fclose(ref_fp);
+    return -2;
+  }
+  int frame_number = 0;
+
+  // Allocating size for one I420 frame.
+  const int frame_length = 3 * width * height >> 1;
+  uint8_t* ref = new uint8_t[frame_length];
+  uint8_t* test = new uint8_t[frame_length];
+
+  int ref_bytes = fread(ref, 1, frame_length, ref_fp);
+  int test_bytes = fread(test, 1, frame_length, test_fp);
+  while (ref_bytes == frame_length && test_bytes == frame_length) {
+    switch (video_metrics_type) {
+      case kPSNR:
+        CalculateFrame(kPSNR, ref, test, width, height, frame_number,
+                       psnr_result);
+        break;
+      case kSSIM:
+        CalculateFrame(kSSIM, ref, test, width, height, frame_number,
+                       ssim_result);
+        break;
+      case kBoth:
+        CalculateFrame(kPSNR, ref, test, width, height, frame_number,
+                       psnr_result);
+        CalculateFrame(kSSIM, ref, test, width, height, frame_number,
+                       ssim_result);
+        break;
+      default:
+        assert(false);
+    }
+    frame_number++;
+    ref_bytes = fread(ref, 1, frame_length, ref_fp);
+    test_bytes = fread(test, 1, frame_length, test_fp);
+  }
+  int return_code = 0;
+  if (frame_number == 0) {
+    fprintf(stderr, "Tried to measure video metrics from empty files "
+            "(reference file: %s  test file: %s)\n", ref_filename,
+            test_filename);
+    return_code = -3;
+  } else {
+    CalculateStats(psnr_result);
+    CalculateStats(ssim_result);
+  }
+  delete [] ref;
+  delete [] test;
+  fclose(ref_fp);
+  fclose(test_fp);
+  return return_code;
+}
+
+int I420MetricsFromFiles(const char* ref_filename,
+                         const char* test_filename,
+                         int width,
+                         int height,
+                         QualityMetricsResult* psnr_result,
+                         QualityMetricsResult* ssim_result) {
+  assert(psnr_result != NULL);
+  assert(ssim_result != NULL);
+  return CalculateMetrics(kBoth, ref_filename, test_filename, width, height,
+                          psnr_result, ssim_result);
+}
+
+int I420PSNRFromFiles(const char* ref_filename,
+                      const char* test_filename,
+                      int width,
+                      int height,
+                      QualityMetricsResult* result) {
+  assert(result != NULL);
+  return CalculateMetrics(kPSNR, ref_filename, test_filename, width, height,
+                          result, NULL);
+}
+
+int I420SSIMFromFiles(const char* ref_filename,
+                      const char* test_filename,
+                      int width,
+                      int height,
+                      QualityMetricsResult* result) {
+  assert(result != NULL);
+  return CalculateMetrics(kSSIM, ref_filename, test_filename, width, height,
+                          NULL, result);
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/test/testsupport/metrics/video_metrics.h b/test/testsupport/metrics/video_metrics.h
new file mode 100644
index 0000000..df11a49
--- /dev/null
+++ b/test/testsupport/metrics/video_metrics.h
@@ -0,0 +1,112 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_TESTSUPPORT_METRICS_VIDEO_METRICS_H_
+#define WEBRTC_TESTSUPPORT_METRICS_VIDEO_METRICS_H_
+
+#include <limits>
+#include <vector>
+
+namespace webrtc {
+namespace test {
+
+// Contains video quality metrics result for a single frame.
+struct FrameResult {
+  int frame_number;
+  double value;
+};
+
+// Result from a PSNR/SSIM calculation operation.
+// The frames in this data structure are 0-indexed.
+struct QualityMetricsResult {
+  QualityMetricsResult() :
+    average(0.0),
+    min(std::numeric_limits<double>::max()),
+    max(std::numeric_limits<double>::min()),
+    min_frame_number(-1),
+    max_frame_number(-1)
+  {};
+  double average;
+  double min;
+  double max;
+  int min_frame_number;
+  int max_frame_number;
+  std::vector<FrameResult> frames;
+};
+
+// Calculates PSNR and SSIM values for the reference and test video files
+// (must be in I420 format). All calculated values are filled into the
+// QualityMetricsResult stucts.
+// PSNR values have the unit decibel (dB) where a high value means the test file
+// is similar to the reference file. The higher value, the more similar.
+// For more info about PSNR, see http://en.wikipedia.org/wiki/PSNR
+// SSIM values range between -1.0 and 1.0, where 1.0 means the files are
+// identical. For more info about SSIM, see http://en.wikipedia.org/wiki/SSIM
+// This function only compares video frames up to the point when the shortest
+// video ends.
+// Return value:
+//  0 if successful, negative on errors:
+// -1 if the source file cannot be opened
+// -2 if the test file cannot be opened
+// -3 if any of the files are empty
+// -4 if any arguments are invalid.
+int I420MetricsFromFiles(const char* ref_filename,
+                         const char* test_filename,
+                         int width,
+                         int height,
+                         QualityMetricsResult* psnr_result,
+                         QualityMetricsResult* ssim_result);
+
+// Calculates PSNR values for the reference and test video files (must be in
+// I420 format). All calculated values are filled into the QualityMetricsResult
+// struct.
+// PSNR values have the unit decibel (dB) where a high value means the test file
+// is similar to the reference file. The higher value, the more similar.
+// This function only compares video frames up to the point when the shortest
+// video ends.
+// For more info about PSNR, see http://en.wikipedia.org/wiki/PSNR
+//
+// Return value:
+//  0 if successful, negative on errors:
+// -1 if the source file cannot be opened
+// -2 if the test file cannot be opened
+// -3 if any of the files are empty
+// -4 if any arguments are invalid.
+int I420PSNRFromFiles(const char* ref_filename,
+                      const char* test_filename,
+                      int width,
+                      int height,
+                      QualityMetricsResult* result);
+
+// Calculates SSIM values for the reference and test video files (must be in
+// I420 format). All calculated values are filled into the QualityMetricsResult
+// struct.
+// SSIM values range between -1.0 and 1.0, where 1.0 means the files are
+// identical.
+// This function only compares video frames up to the point when the shortest
+// video ends.
+// For more info about SSIM, see http://en.wikipedia.org/wiki/SSIM
+//
+// Return value:
+//  0 if successful, negative on errors:
+// -1 if the source file cannot be opened
+// -2 if the test file cannot be opened
+// -3 if any of the files are empty
+// -4 if any arguments are invalid.
+int I420SSIMFromFiles(const char* ref_filename,
+                      const char* test_filename,
+                      int width,
+                      int height,
+                      QualityMetricsResult* result);
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif // WEBRTC_TESTSUPPORT_METRICS_VIDEO_METRICS_H_
diff --git a/test/testsupport/metrics/video_metrics_unittest.cc b/test/testsupport/metrics/video_metrics_unittest.cc
new file mode 100644
index 0000000..e77dbff
--- /dev/null
+++ b/test/testsupport/metrics/video_metrics_unittest.cc
@@ -0,0 +1,139 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testsupport/metrics/video_metrics.h"
+
+#include "gtest/gtest.h"
+#include "testsupport/fileutils.h"
+
+namespace webrtc {
+
+static const char* kEmptyFileName = "video_metrics_unittest_empty_file.tmp";
+static const char* kNonExistingFileName = "video_metrics_unittest_non_existing";
+static const int kWidth = 352;
+static const int kHeight = 288;
+
+static const int kMissingReferenceFileReturnCode = -1;
+static const int kMissingTestFileReturnCode = -2;
+static const int kEmptyFileReturnCode = -3;
+static const double kPsnrPerfectResult =  48.0;
+static const double kSsimPerfectResult = 1.0;
+
+class VideoMetricsTest: public testing::Test {
+ protected:
+  VideoMetricsTest() {
+    video_file_ = webrtc::test::ResourcePath("foreman_cif_short", "yuv");
+  }
+  virtual ~VideoMetricsTest() {}
+  void SetUp() {
+    // Create an empty file:
+    FILE* dummy = fopen(kEmptyFileName, "wb");
+    fclose(dummy);
+  }
+  void TearDown() {
+    std::remove(kEmptyFileName);
+  }
+  webrtc::test::QualityMetricsResult psnr_result_;
+  webrtc::test::QualityMetricsResult ssim_result_;
+  std::string video_file_;
+};
+
+// Tests that it is possible to run with the same reference as test file
+TEST_F(VideoMetricsTest, ReturnsPerfectResultForIdenticalFilesPSNR) {
+  EXPECT_EQ(0, I420PSNRFromFiles(video_file_.c_str(), video_file_.c_str(),
+                                 kWidth, kHeight, &psnr_result_));
+  EXPECT_EQ(kPsnrPerfectResult, psnr_result_.average);
+}
+
+TEST_F(VideoMetricsTest, ReturnsPerfectResultForIdenticalFilesSSIM) {
+  EXPECT_EQ(0, I420SSIMFromFiles(video_file_.c_str(), video_file_.c_str(),
+                                 kWidth, kHeight, &ssim_result_));
+  EXPECT_EQ(kSsimPerfectResult, ssim_result_.average);
+}
+
+TEST_F(VideoMetricsTest, ReturnsPerfectResultForIdenticalFilesBothMetrics) {
+  EXPECT_EQ(0, I420MetricsFromFiles(video_file_.c_str(), video_file_.c_str(),
+                                    kWidth, kHeight, &psnr_result_,
+                                    &ssim_result_));
+  EXPECT_EQ(kPsnrPerfectResult, psnr_result_.average);
+  EXPECT_EQ(kSsimPerfectResult, ssim_result_.average);
+}
+
+// Tests that the right return code is given when the reference file is missing.
+TEST_F(VideoMetricsTest, MissingReferenceFilePSNR) {
+  EXPECT_EQ(kMissingReferenceFileReturnCode,
+            I420PSNRFromFiles(kNonExistingFileName, video_file_.c_str(),
+                              kWidth, kHeight, &ssim_result_));
+}
+
+TEST_F(VideoMetricsTest, MissingReferenceFileSSIM) {
+  EXPECT_EQ(kMissingReferenceFileReturnCode,
+            I420SSIMFromFiles(kNonExistingFileName, video_file_.c_str(),
+                              kWidth, kHeight, &ssim_result_));
+}
+
+TEST_F(VideoMetricsTest, MissingReferenceFileBothMetrics) {
+  EXPECT_EQ(kMissingReferenceFileReturnCode,
+            I420MetricsFromFiles(kNonExistingFileName, video_file_.c_str(),
+                                 kWidth, kHeight,
+                                 &psnr_result_, &ssim_result_));
+}
+
+// Tests that the right return code is given when the test file is missing.
+TEST_F(VideoMetricsTest, MissingTestFilePSNR) {
+  EXPECT_EQ(kMissingTestFileReturnCode,
+            I420PSNRFromFiles(video_file_.c_str(), kNonExistingFileName,
+                              kWidth, kHeight, &ssim_result_));
+}
+
+TEST_F(VideoMetricsTest, MissingTestFileSSIM) {
+  EXPECT_EQ(kMissingTestFileReturnCode,
+            I420SSIMFromFiles(video_file_.c_str(), kNonExistingFileName,
+                              kWidth, kHeight, &ssim_result_));
+}
+
+TEST_F(VideoMetricsTest, MissingTestFileBothMetrics) {
+  EXPECT_EQ(kMissingTestFileReturnCode,
+            I420MetricsFromFiles(video_file_.c_str(), kNonExistingFileName,
+                                 kWidth, kHeight,
+                                 &psnr_result_, &ssim_result_));
+}
+
+// Tests that the method can be executed with empty files.
+TEST_F(VideoMetricsTest, EmptyFilesPSNR) {
+  EXPECT_EQ(kEmptyFileReturnCode,
+            I420PSNRFromFiles(kEmptyFileName, video_file_.c_str(),
+                              kWidth, kHeight, &ssim_result_));
+  EXPECT_EQ(kEmptyFileReturnCode,
+            I420PSNRFromFiles(video_file_.c_str(), kEmptyFileName,
+                              kWidth, kHeight, &ssim_result_));
+}
+
+TEST_F(VideoMetricsTest, EmptyFilesSSIM) {
+  EXPECT_EQ(kEmptyFileReturnCode,
+            I420SSIMFromFiles(kEmptyFileName, video_file_.c_str(),
+                              kWidth, kHeight, &ssim_result_));
+  EXPECT_EQ(kEmptyFileReturnCode,
+            I420SSIMFromFiles(video_file_.c_str(), kEmptyFileName,
+                              kWidth, kHeight, &ssim_result_));
+}
+
+TEST_F(VideoMetricsTest, EmptyFilesBothMetrics) {
+  EXPECT_EQ(kEmptyFileReturnCode,
+            I420MetricsFromFiles(kEmptyFileName, video_file_.c_str(),
+                                 kWidth, kHeight,
+                                 &psnr_result_, &ssim_result_));
+  EXPECT_EQ(kEmptyFileReturnCode,
+              I420MetricsFromFiles(video_file_.c_str(), kEmptyFileName,
+                                   kWidth, kHeight,
+                                   &psnr_result_, &ssim_result_));
+}
+
+}  // namespace webrtc
diff --git a/test/testsupport/mock/mock_frame_reader.h b/test/testsupport/mock/mock_frame_reader.h
new file mode 100644
index 0000000..ecfc13c
--- /dev/null
+++ b/test/testsupport/mock/mock_frame_reader.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_TEST_TESTSUPPORT_MOCK_MOCK_FRAME_READER_H_
+#define WEBRTC_TEST_TESTSUPPORT_MOCK_MOCK_FRAME_READER_H_
+
+#include "testsupport/frame_reader.h"
+
+#include "gmock/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockFrameReader : public FrameReader {
+ public:
+  MOCK_METHOD0(Init, bool());
+  MOCK_METHOD1(ReadFrame, bool(WebRtc_UWord8* source_buffer));
+  MOCK_METHOD0(Close, void());
+  MOCK_METHOD0(FrameLength, int());
+  MOCK_METHOD0(NumberOfFrames, int());
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // WEBRTC_TEST_TESTSUPPORT_MOCK_MOCK_FRAME_READER_H_
diff --git a/test/testsupport/mock/mock_frame_writer.h b/test/testsupport/mock/mock_frame_writer.h
new file mode 100644
index 0000000..ba79184
--- /dev/null
+++ b/test/testsupport/mock/mock_frame_writer.h
@@ -0,0 +1,32 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_TEST_TESTSUPPORT_MOCK_MOCK_FRAME_WRITER_H_
+#define WEBRTC_TEST_TESTSUPPORT_MOCK_MOCK_FRAME_WRITER_H_
+
+#include "testsupport/frame_writer.h"
+
+#include "gmock/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockFrameWriter : public FrameWriter {
+ public:
+  MOCK_METHOD0(Init, bool());
+  MOCK_METHOD1(WriteFrame, bool(WebRtc_UWord8* frame_buffer));
+  MOCK_METHOD0(Close, void());
+  MOCK_METHOD0(FrameLength, int());
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // WEBRTC_TEST_TESTSUPPORT_MOCK_MOCK_FRAME_WRITER_H_
diff --git a/test/testsupport/packet_reader.cc b/test/testsupport/packet_reader.cc
new file mode 100644
index 0000000..e8859d1
--- /dev/null
+++ b/test/testsupport/packet_reader.cc
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testsupport/packet_reader.h"
+
+#include <cassert>
+#include <cstdio>
+
+namespace webrtc {
+namespace test {
+
+PacketReader::PacketReader()
+    : initialized_(false) {}
+
+PacketReader::~PacketReader() {}
+
+void PacketReader::InitializeReading(WebRtc_UWord8* data,
+                                     int data_length_in_bytes,
+                                     int packet_size_in_bytes) {
+  assert(data);
+  assert(data_length_in_bytes >= 0);
+  assert(packet_size_in_bytes > 0);
+  data_ = data;
+  data_length_ = data_length_in_bytes;
+  packet_size_ = packet_size_in_bytes;
+  currentIndex_ = 0;
+  initialized_ = true;
+}
+
+int PacketReader::NextPacket(WebRtc_UWord8** packet_pointer) {
+  if (!initialized_) {
+    fprintf(stderr, "Attempting to use uninitialized PacketReader!\n");
+    return -1;
+  }
+  *packet_pointer = data_ + currentIndex_;
+  // Check if we're about to read the last packet:
+  if (data_length_ - currentIndex_ <= packet_size_) {
+    int size = data_length_ - currentIndex_;
+    currentIndex_ = data_length_;
+    assert(size >= 0);
+    return size;
+  }
+  currentIndex_ += packet_size_;
+  assert(packet_size_ >= 0);
+  return packet_size_;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/test/testsupport/packet_reader.h b/test/testsupport/packet_reader.h
new file mode 100644
index 0000000..4cb0bb1
--- /dev/null
+++ b/test/testsupport/packet_reader.h
@@ -0,0 +1,53 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_TEST_TESTSUPPORT_PACKET_READER_H_
+#define WEBRTC_TEST_TESTSUPPORT_PACKET_READER_H_
+
+#include "typedefs.h"
+
+namespace webrtc {
+namespace test {
+
+// Reads chunks of data to simulate network packets from a byte array.
+class PacketReader {
+ public:
+  PacketReader();
+  virtual ~PacketReader();
+
+  // Inizializes a new reading operation. Must be done before invoking the
+  // NextPacket method.
+  // * data_length_in_bytes is the length of the data byte array. Must be >= 0.
+  //   0 length will result in no packets are read.
+  // * packet_size_in_bytes is the number of bytes to read in each NextPacket
+  //   method call. Must be > 0
+  virtual void InitializeReading(WebRtc_UWord8* data, int data_length_in_bytes,
+                                 int packet_size_in_bytes);
+
+  // Moves the supplied pointer to the beginning of the next packet.
+  // Returns:
+  // *  The size of the packet ready to read (lower than the packet size for
+  //    the last packet)
+  // *  0 if there are no more packets to read
+  // * -1 if InitializeReading has not been called (also prints to stderr).
+  virtual int NextPacket(WebRtc_UWord8** packet_pointer);
+
+ private:
+  WebRtc_UWord8* data_;
+  int data_length_;
+  int packet_size_;
+  int currentIndex_;
+  bool initialized_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // WEBRTC_TEST_TESTSUPPORT_PACKET_READER_H_
diff --git a/test/testsupport/packet_reader_unittest.cc b/test/testsupport/packet_reader_unittest.cc
new file mode 100644
index 0000000..6719e4c
--- /dev/null
+++ b/test/testsupport/packet_reader_unittest.cc
@@ -0,0 +1,123 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testsupport/packet_reader.h"
+
+#include "gtest/gtest.h"
+#include "testsupport/unittest_utils.h"
+
+namespace webrtc {
+namespace test {
+
+class PacketReaderTest: public PacketRelatedTest {
+ protected:
+  PacketReaderTest() {}
+  virtual ~PacketReaderTest() {}
+  void SetUp() {
+    reader_ = new PacketReader();
+  }
+  void TearDown() {
+    delete reader_;
+  }
+  void VerifyPacketData(int expected_length,
+                        int actual_length,
+                        WebRtc_UWord8* original_data_pointer,
+                        WebRtc_UWord8* new_data_pointer) {
+    EXPECT_EQ(expected_length, actual_length);
+    EXPECT_EQ(*original_data_pointer, *new_data_pointer);
+    EXPECT_EQ(0, memcmp(original_data_pointer, new_data_pointer,
+                        actual_length));
+  }
+  PacketReader* reader_;
+};
+
+// Test lack of initialization
+TEST_F(PacketReaderTest, Uninitialized) {
+  WebRtc_UWord8* data_pointer = NULL;
+  EXPECT_EQ(-1, reader_->NextPacket(&data_pointer));
+  EXPECT_EQ(NULL, data_pointer);
+}
+
+TEST_F(PacketReaderTest, InitializeZeroLengthArgument) {
+  reader_->InitializeReading(packet_data_, 0, kPacketSizeInBytes);
+  ASSERT_EQ(0, reader_->NextPacket(&packet_data_pointer_));
+}
+
+// Test with something smaller than one packet
+TEST_F(PacketReaderTest, NormalSmallData) {
+  const int kDataLengthInBytes = 1499;
+  WebRtc_UWord8 data[kDataLengthInBytes];
+  WebRtc_UWord8* data_pointer = data;
+  memset(data, 1, kDataLengthInBytes);
+
+  reader_->InitializeReading(data, kDataLengthInBytes, kPacketSizeInBytes);
+  int length_to_read = reader_->NextPacket(&data_pointer);
+  VerifyPacketData(kDataLengthInBytes, length_to_read, data, data_pointer);
+  EXPECT_EQ(0, data_pointer - data);  // pointer hasn't moved
+
+  // Reading another one shall result in 0 bytes:
+  length_to_read = reader_->NextPacket(&data_pointer);
+  EXPECT_EQ(0, length_to_read);
+  EXPECT_EQ(kDataLengthInBytes, data_pointer - data);
+}
+
+// Test with data length that exactly matches one packet
+TEST_F(PacketReaderTest, NormalOnePacketData) {
+  WebRtc_UWord8 data[kPacketSizeInBytes];
+  WebRtc_UWord8* data_pointer = data;
+  memset(data, 1, kPacketSizeInBytes);
+
+  reader_->InitializeReading(data, kPacketSizeInBytes, kPacketSizeInBytes);
+  int length_to_read = reader_->NextPacket(&data_pointer);
+  VerifyPacketData(kPacketSizeInBytes, length_to_read, data, data_pointer);
+  EXPECT_EQ(0, data_pointer - data);  // pointer hasn't moved
+
+  // Reading another one shall result in 0 bytes:
+  length_to_read = reader_->NextPacket(&data_pointer);
+  EXPECT_EQ(0, length_to_read);
+  EXPECT_EQ(kPacketSizeInBytes, data_pointer - data);
+}
+
+// Test with data length that will result in 3 packets
+TEST_F(PacketReaderTest, NormalLargeData) {
+  reader_->InitializeReading(packet_data_, kPacketDataLength,
+                             kPacketSizeInBytes);
+
+  int length_to_read = reader_->NextPacket(&packet_data_pointer_);
+  VerifyPacketData(kPacketSizeInBytes, length_to_read,
+                   packet1_, packet_data_pointer_);
+
+  length_to_read = reader_->NextPacket(&packet_data_pointer_);
+  VerifyPacketData(kPacketSizeInBytes, length_to_read,
+                   packet2_, packet_data_pointer_);
+
+  length_to_read = reader_->NextPacket(&packet_data_pointer_);
+  VerifyPacketData(1u, length_to_read,
+                   packet3_, packet_data_pointer_);
+
+  // Reading another one shall result in 0 bytes:
+  length_to_read = reader_->NextPacket(&packet_data_pointer_);
+  EXPECT_EQ(0, length_to_read);
+  EXPECT_EQ(kPacketDataLength, packet_data_pointer_ - packet_data_);
+}
+
+// Test with empty data.
+TEST_F(PacketReaderTest, EmptyData) {
+  const int kDataLengthInBytes = 0;
+  WebRtc_UWord8* data = new WebRtc_UWord8[kDataLengthInBytes];
+  reader_->InitializeReading(data, kDataLengthInBytes, kPacketSizeInBytes);
+  EXPECT_EQ(kDataLengthInBytes, reader_->NextPacket(&data));
+  // Do it again to make sure nothing changes
+  EXPECT_EQ(kDataLengthInBytes, reader_->NextPacket(&data));
+  delete[] data;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/test/testsupport/unittest_utils.h b/test/testsupport/unittest_utils.h
new file mode 100644
index 0000000..963a5d3
--- /dev/null
+++ b/test/testsupport/unittest_utils.h
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_TEST_TESTSUPPORT_UNITTEST_UTILS_H_
+#define WEBRTC_TEST_TESTSUPPORT_UNITTEST_UTILS_H_
+
+namespace webrtc {
+namespace test {
+
+const int kPacketSizeInBytes = 1500;
+const int kPacketDataLength = kPacketSizeInBytes * 2 + 1;
+const int kPacketDataNumberOfPackets = 3;
+
+// A base test fixture for packet related tests. Contains
+// two full prepared packets with 1s, 2s in their data and a third packet with
+// a single 3 in it (size=1).
+// A packet data structure is also available, that contains these three packets
+// in order.
+class PacketRelatedTest: public testing::Test {
+ protected:
+  // Tree packet byte arrays with data used for verification:
+  WebRtc_UWord8 packet1_[kPacketSizeInBytes];
+  WebRtc_UWord8 packet2_[kPacketSizeInBytes];
+  WebRtc_UWord8 packet3_[1];
+  // Construct a data structure containing these packets
+  WebRtc_UWord8 packet_data_[kPacketDataLength];
+  WebRtc_UWord8* packet_data_pointer_;
+
+  PacketRelatedTest() {
+    packet_data_pointer_ = packet_data_;
+
+    memset(packet1_, 1, kPacketSizeInBytes);
+    memset(packet2_, 2, kPacketSizeInBytes);
+    memset(packet3_, 3, 1);
+    // Fill the packet_data:
+    memcpy(packet_data_pointer_, packet1_, kPacketSizeInBytes);
+    memcpy(packet_data_pointer_ + kPacketSizeInBytes, packet2_,
+           kPacketSizeInBytes);
+    memcpy(packet_data_pointer_ + kPacketSizeInBytes * 2, packet3_, 1);
+  }
+  virtual ~PacketRelatedTest() {}
+  void SetUp() {
+    // Initialize the random generator with 0 to get deterministic behavior
+    srand(0);
+  }
+  void TearDown() {}
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // WEBRTC_TEST_TESTSUPPORT_UNITTEST_UTILS_H_