CTS Verifier loopback: use noise algorithm

Replace the old latency measurement code with a new implementation.
It now uses AAudio instead of OpenSL ES.
It also uses a random noise burst instead of the "Larsen Effect".
Now the result will be more consistent and work at almost any volume level.

In a later CL, the code in the "/analyzer/" folder will be read from
"/external/oboe/". We will also use Oboe to test latency on OpenSL ES.

Bug: 148606576
Test: insert loopback adapter
Test: mmma cts/apps/CtsVerifier
Test: adb install -r -g $OUT/data/app/CtsVerifier/CtsVerifier.apk
Test: launch CTS Verifier
Test: tap "Audio Loopback Latency Test"
Change-Id: If8fdf259385b4521e834100c800016ff9ba2268e
diff --git a/apps/CtsVerifier/jni/audio_loopback/Android.bp b/apps/CtsVerifier/jni/audio_loopback/Android.bp
index 6709776..635de74 100644
--- a/apps/CtsVerifier/jni/audio_loopback/Android.bp
+++ b/apps/CtsVerifier/jni/audio_loopback/Android.bp
@@ -1,23 +1,24 @@
 cc_test_library {
     name: "libaudioloopback_jni",
     srcs: [
-        "sles.cpp",
-        "jni_sles.c",
-        "audio_utils/atomic.c",
-        "audio_utils/fifo.c",
-        "audio_utils/roundup.c",
+        "jni-bridge.cpp",
+        "NativeAudioAnalyzer.cpp",
+    ],
+    include_dirs: [
+        "frameworks/av/media/ndk/include",
+        "system/core/include/cutils",
     ],
     shared_libs: [
-        "libOpenSLES",
+        "libaaudio",
         "liblog",
     ],
+    stl: "libc++_static",
     ldflags: ["-Wl,--hash-style=sysv"],
     cflags: [
-        "-DSTDC_HEADERS",
         "-Werror",
         "-Wall",
         // For slCreateEngine
         "-Wno-deprecated",
     ],
-    sdk_version: "23",
+    sdk_version: "current",
 }
diff --git a/apps/CtsVerifier/jni/audio_loopback/NativeAudioAnalyzer.cpp b/apps/CtsVerifier/jni/audio_loopback/NativeAudioAnalyzer.cpp
new file mode 100644
index 0000000..d8d6946
--- /dev/null
+++ b/apps/CtsVerifier/jni/audio_loopback/NativeAudioAnalyzer.cpp
@@ -0,0 +1,322 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeAudioAnalyzer.h"
+
+static void convertPcm16ToFloat(const int16_t *source,
+                                float *destination,
+                                int32_t numSamples) {
+    constexpr float scaler = 1.0f / 32768.0f;
+    for (int i = 0; i < numSamples; i++) {
+        destination[i] = source[i] * scaler;
+    }
+}
+
+// Fill the audio output buffer.
+int32_t NativeAudioAnalyzer::readFormattedData(int32_t numFrames) {
+    int32_t framesRead = AAUDIO_ERROR_INVALID_FORMAT;
+    if (mActualInputFormat == AAUDIO_FORMAT_PCM_I16) {
+        framesRead = AAudioStream_read(mInputStream, mInputShortData,
+                                       numFrames,
+                                       0 /* timeoutNanoseconds */);
+    } else if (mActualInputFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+        framesRead = AAudioStream_read(mInputStream, mInputFloatData,
+                                       numFrames,
+                                       0 /* timeoutNanoseconds */);
+    } else {
+        ALOGE("ERROR actualInputFormat = %d\n", mActualInputFormat);
+        assert(false);
+    }
+    if (framesRead < 0) {
+        // Expect INVALID_STATE if STATE_STARTING
+        if (mFramesReadTotal > 0) {
+            mInputError = framesRead;
+            ALOGE("ERROR in read = %d = %s\n", framesRead,
+                   AAudio_convertResultToText(framesRead));
+        } else {
+            framesRead = 0;
+        }
+    } else {
+        mFramesReadTotal += framesRead;
+    }
+    return framesRead;
+}
+
+aaudio_data_callback_result_t NativeAudioAnalyzer::dataCallbackProc(
+        void *audioData,
+        int32_t numFrames
+) {
+    aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
+    float  *outputData = (float  *) audioData;
+
+    // Read audio data from the input stream.
+    int32_t actualFramesRead;
+
+    if (numFrames > mInputFramesMaximum) {
+        ALOGE("%s() numFrames:%d > mInputFramesMaximum:%d", __func__, numFrames, mInputFramesMaximum);
+        mInputError = AAUDIO_ERROR_OUT_OF_RANGE;
+        return AAUDIO_CALLBACK_RESULT_STOP;
+    }
+
+    if (numFrames > mMaxNumFrames) {
+        mMaxNumFrames = numFrames;
+    }
+    if (numFrames < mMinNumFrames) {
+        mMinNumFrames = numFrames;
+    }
+
+    // Silence the output.
+    int32_t numBytes = numFrames * mActualOutputChannelCount * sizeof(float);
+    memset(audioData, 0 /* value */, numBytes);
+
+    if (mNumCallbacksToDrain > 0) {
+        // Drain the input FIFOs.
+        int32_t totalFramesRead = 0;
+        do {
+            actualFramesRead = readFormattedData(numFrames);
+            if (actualFramesRead > 0) {
+                totalFramesRead += actualFramesRead;
+            } else if (actualFramesRead < 0) {
+                callbackResult = AAUDIO_CALLBACK_RESULT_STOP;
+            }
+            // Ignore errors because input stream may not be started yet.
+        } while (actualFramesRead > 0);
+        // Only counts if we actually got some data.
+        if (totalFramesRead > 0) {
+            mNumCallbacksToDrain--;
+        }
+
+    } else if (mNumCallbacksToNotRead > 0) {
+        // Let the input fill up a bit so we are not so close to the write pointer.
+        mNumCallbacksToNotRead--;
+    } else if (mNumCallbacksToDiscard > 0) {
+        // Ignore. Allow the input to fill back up to equilibrium with the output.
+        actualFramesRead = readFormattedData(numFrames);
+        if (actualFramesRead < 0) {
+            callbackResult = AAUDIO_CALLBACK_RESULT_STOP;
+        }
+        mNumCallbacksToDiscard--;
+
+    } else {
+        // The full duplex stream is now stable so process the audio.
+        int32_t numInputBytes = numFrames * mActualInputChannelCount * sizeof(float);
+        memset(mInputFloatData, 0 /* value */, numInputBytes);
+
+        int64_t inputFramesWritten = AAudioStream_getFramesWritten(mInputStream);
+        int64_t inputFramesRead = AAudioStream_getFramesRead(mInputStream);
+        int64_t framesAvailable = inputFramesWritten - inputFramesRead;
+
+        // Read the INPUT data.
+        actualFramesRead = readFormattedData(numFrames); // READ
+        if (actualFramesRead < 0) {
+            callbackResult = AAUDIO_CALLBACK_RESULT_STOP;
+        } else {
+            if (actualFramesRead < numFrames) {
+                if(actualFramesRead < (int32_t) framesAvailable) {
+                    ALOGE("insufficient for no reason, numFrames = %d"
+                                   ", actualFramesRead = %d"
+                                   ", inputFramesWritten = %d"
+                                   ", inputFramesRead = %d"
+                                   ", available = %d\n",
+                           numFrames,
+                           actualFramesRead,
+                           (int) inputFramesWritten,
+                           (int) inputFramesRead,
+                           (int) framesAvailable);
+                }
+                mInsufficientReadCount++;
+                mInsufficientReadFrames += numFrames - actualFramesRead; // deficit
+                // ALOGE("Error insufficientReadCount = %d\n",(int)mInsufficientReadCount);
+            }
+
+            int32_t numSamples = actualFramesRead * mActualInputChannelCount;
+
+            if (mActualInputFormat == AAUDIO_FORMAT_PCM_I16) {
+                convertPcm16ToFloat(mInputShortData, mInputFloatData, numSamples);
+            }
+
+            // Process the INPUT and generate the OUTPUT.
+            mLoopbackProcessor->process(mInputFloatData,
+                                               mActualInputChannelCount,
+                                               numFrames,
+                                               outputData,
+                                               mActualOutputChannelCount,
+                                               numFrames);
+
+            mIsDone = mLoopbackProcessor->isDone();
+            if (mIsDone) {
+                callbackResult = AAUDIO_CALLBACK_RESULT_STOP;
+            }
+        }
+    }
+    mFramesWrittenTotal += numFrames;
+
+    return callbackResult;
+}
+
+static aaudio_data_callback_result_t s_MyDataCallbackProc(
+        AAudioStream * /* outputStream */,
+        void *userData,
+        void *audioData,
+        int32_t numFrames) {
+    NativeAudioAnalyzer *myData = (NativeAudioAnalyzer *) userData;
+    return myData->dataCallbackProc(audioData, numFrames);
+}
+
+static void s_MyErrorCallbackProc(
+        AAudioStream * /* stream */,
+        void * userData,
+        aaudio_result_t error) {
+    ALOGE("Error Callback, error: %d\n",(int)error);
+    NativeAudioAnalyzer *myData = (NativeAudioAnalyzer *) userData;
+    myData->mOutputError = error;
+}
+
+bool NativeAudioAnalyzer::isRecordingComplete() {
+    return mPulseLatencyAnalyzer.isRecordingComplete();
+}
+
+int NativeAudioAnalyzer::analyze() {
+    mPulseLatencyAnalyzer.analyze();
+    return getError(); // TODO review
+}
+
+double NativeAudioAnalyzer::getLatencyMillis() {
+    return mPulseLatencyAnalyzer.getMeasuredLatency() * 1000.0 / 48000;
+}
+
+double NativeAudioAnalyzer::getConfidence() {
+    return mPulseLatencyAnalyzer.getMeasuredConfidence();
+}
+
+aaudio_result_t NativeAudioAnalyzer::openAudio() {
+    AAudioStreamBuilder *builder = nullptr;
+
+    mLoopbackProcessor = &mPulseLatencyAnalyzer; // for latency test
+
+    // Use an AAudioStreamBuilder to contain requested parameters.
+    aaudio_result_t result = AAudio_createStreamBuilder(&builder);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudio_createStreamBuilder() returned %s",
+               AAudio_convertResultToText(result));
+        return result;
+    }
+
+    // Create the OUTPUT stream -----------------------
+    AAudioStreamBuilder_setDirection(builder, AAUDIO_DIRECTION_OUTPUT);
+    AAudioStreamBuilder_setPerformanceMode(builder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+    AAudioStreamBuilder_setSharingMode(builder, AAUDIO_SHARING_MODE_EXCLUSIVE);
+    AAudioStreamBuilder_setFormat(builder, AAUDIO_FORMAT_PCM_FLOAT);
+    AAudioStreamBuilder_setChannelCount(builder, 2); // stereo
+    AAudioStreamBuilder_setDataCallback(builder, s_MyDataCallbackProc, this);
+    AAudioStreamBuilder_setErrorCallback(builder, s_MyErrorCallbackProc, this);
+
+    result = AAudioStreamBuilder_openStream(builder, &mOutputStream);
+    if (result != AAUDIO_OK) {
+        ALOGE("NativeAudioAnalyzer::openAudio() OUTPUT error %s",
+               AAudio_convertResultToText(result));
+        return result;
+    }
+
+    int32_t outputFramesPerBurst = AAudioStream_getFramesPerBurst(mOutputStream);
+    (void) AAudioStream_setBufferSizeInFrames(mOutputStream, outputFramesPerBurst * kDefaultOutputSizeBursts);
+
+    int32_t outputSampleRate = AAudioStream_getSampleRate(mOutputStream);
+    mActualOutputChannelCount = AAudioStream_getChannelCount(mOutputStream);
+
+    // Create the INPUT stream -----------------------
+    AAudioStreamBuilder_setDirection(builder, AAUDIO_DIRECTION_INPUT);
+    AAudioStreamBuilder_setFormat(builder, AAUDIO_FORMAT_UNSPECIFIED);
+    AAudioStreamBuilder_setSampleRate(builder, outputSampleRate); // must match
+    AAudioStreamBuilder_setChannelCount(builder, 1); // mono
+    AAudioStreamBuilder_setDataCallback(builder, nullptr, nullptr);
+    AAudioStreamBuilder_setErrorCallback(builder, nullptr, nullptr);
+    result = AAudioStreamBuilder_openStream(builder, &mInputStream);
+    if (result != AAUDIO_OK) {
+        ALOGE("NativeAudioAnalyzer::openAudio() INPUT error %s",
+               AAudio_convertResultToText(result));
+        return result;
+    }
+
+    int32_t actualCapacity = AAudioStream_getBufferCapacityInFrames(mInputStream);
+    (void) AAudioStream_setBufferSizeInFrames(mInputStream, actualCapacity);
+
+    // ------- Setup loopbackData -----------------------------
+    mActualInputFormat = AAudioStream_getFormat(mInputStream);
+    mActualInputChannelCount = AAudioStream_getChannelCount(mInputStream);
+
+    // Allocate a buffer for the audio data.
+    mInputFramesMaximum = 32 * AAudioStream_getFramesPerBurst(mInputStream);
+
+    if (mActualInputFormat == AAUDIO_FORMAT_PCM_I16) {
+        mInputShortData = new int16_t[mInputFramesMaximum * mActualInputChannelCount]{};
+    }
+    mInputFloatData = new float[mInputFramesMaximum * mActualInputChannelCount]{};
+
+    return result;
+}
+
+aaudio_result_t NativeAudioAnalyzer::startAudio() {
+    mLoopbackProcessor->prepareToTest();
+
+    // Start OUTPUT first so INPUT does not overflow.
+    aaudio_result_t result = AAudioStream_requestStart(mOutputStream);
+    if (result != AAUDIO_OK) {
+        stopAudio();
+        return result;
+    }
+
+    result = AAudioStream_requestStart(mInputStream);
+    if (result != AAUDIO_OK) {
+        stopAudio();
+        return result;
+    }
+
+    return result;
+}
+
+aaudio_result_t NativeAudioAnalyzer::stopAudio() {
+    aaudio_result_t result1 = AAUDIO_OK;
+    aaudio_result_t result2 = AAUDIO_OK;
+    ALOGD("stopAudio() , minNumFrames = %d, maxNumFrames = %d\n", mMinNumFrames, mMaxNumFrames);
+    // Stop OUTPUT first because it uses INPUT.
+    if (mOutputStream != nullptr) {
+        result1 = AAudioStream_requestStop(mOutputStream);
+    }
+
+    // Stop INPUT.
+    if (mInputStream != nullptr) {
+        result2 = AAudioStream_requestStop(mInputStream);
+    }
+    return result1 != AAUDIO_OK ? result1 : result2;
+}
+
+aaudio_result_t NativeAudioAnalyzer::closeAudio() {
+    aaudio_result_t result1 = AAUDIO_OK;
+    aaudio_result_t result2 = AAUDIO_OK;
+    // Stop and close OUTPUT first because it uses INPUT.
+    if (mOutputStream != nullptr) {
+        result1 = AAudioStream_close(mOutputStream);
+        mOutputStream = nullptr;
+    }
+
+    // Stop and close INPUT.
+    if (mInputStream != nullptr) {
+        result2 = AAudioStream_close(mInputStream);
+        mInputStream = nullptr;
+    }
+    return result1 != AAUDIO_OK ? result1 : result2;
+}
diff --git a/apps/CtsVerifier/jni/audio_loopback/NativeAudioAnalyzer.h b/apps/CtsVerifier/jni/audio_loopback/NativeAudioAnalyzer.h
new file mode 100644
index 0000000..0d9c64b
--- /dev/null
+++ b/apps/CtsVerifier/jni/audio_loopback/NativeAudioAnalyzer.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CTS_NATIVE_AUDIO_ANALYZER_H
+#define CTS_NATIVE_AUDIO_ANALYZER_H
+
+#define LOG_TAG "NativeAudioAnalyzer"
+#include <android/log.h>
+
+#ifndef MODULE_NAME
+#define MODULE_NAME  "NativeAudioAnalyzer"
+#endif
+
+#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, MODULE_NAME, __VA_ARGS__)
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, MODULE_NAME, __VA_ARGS__)
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, MODULE_NAME, __VA_ARGS__)
+#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, MODULE_NAME, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, MODULE_NAME, __VA_ARGS__)
+#define ALOGF(...) __android_log_print(ANDROID_LOG_FATAL, MODULE_NAME, __VA_ARGS__)
+
+#include <aaudio/AAudio.h>
+
+#include "analyzer/GlitchAnalyzer.h"
+#include "analyzer/LatencyAnalyzer.h"
+
+class NativeAudioAnalyzer {
+public:
+
+    /**
+     * Open the audio input and output streams.
+     * @return AAUDIO_OK or negative error
+     */
+    aaudio_result_t openAudio();
+
+    /**
+     * Start the audio input and output streams.
+     * @return AAUDIO_OK or negative error
+     */
+    aaudio_result_t startAudio();
+
+    /**
+     * Stop the audio input and output streams.
+     * @return AAUDIO_OK or negative error
+     */
+    aaudio_result_t stopAudio();
+
+    /**
+     * Close the audio input and output streams.
+     * @return AAUDIO_OK or negative error
+     */
+    aaudio_result_t closeAudio();
+
+    /**
+     * @return true if enough audio input has been recorded
+     */
+    bool isRecordingComplete();
+
+    /**
+     * Analyze the input and measure the latency between output and input.
+     * @return AAUDIO_OK or negative error
+     */
+    int analyze();
+
+    /**
+     * @return the measured latency in milliseconds
+     */
+    double getLatencyMillis();
+
+    /**
+     * The confidence is based on a normalized correlation.
+     * It ranges from 0.0 to 1.0. Higher is better.
+     *
+     * @return the confidence in the latency result
+     */
+    double getConfidence();
+
+    aaudio_result_t getError() {
+        return mInputError ? mInputError : mOutputError;
+    }
+
+    AAudioStream      *mInputStream = nullptr;
+    AAudioStream      *mOutputStream = nullptr;
+    aaudio_format_t    mActualInputFormat = AAUDIO_FORMAT_INVALID;
+    int16_t           *mInputShortData = nullptr;
+    float             *mInputFloatData = nullptr;
+
+    aaudio_result_t    mInputError = AAUDIO_OK;
+    aaudio_result_t    mOutputError = AAUDIO_OK;
+
+aaudio_data_callback_result_t dataCallbackProc(
+        void *audioData,
+        int32_t numFrames);
+
+private:
+
+    int32_t readFormattedData(int32_t numFrames);
+
+    GlitchAnalyzer       mSineAnalyzer;
+    PulseLatencyAnalyzer mPulseLatencyAnalyzer;
+    LoopbackProcessor   *mLoopbackProcessor;
+
+    int32_t            mInputFramesMaximum = 0;
+    int32_t            mActualInputChannelCount = 0;
+    int32_t            mActualOutputChannelCount = 0;
+    int32_t            mNumCallbacksToDrain = kNumCallbacksToDrain;
+    int32_t            mNumCallbacksToNotRead = kNumCallbacksToNotRead;
+    int32_t            mNumCallbacksToDiscard = kNumCallbacksToDiscard;
+    int32_t            mMinNumFrames = INT32_MAX;
+    int32_t            mMaxNumFrames = 0;
+    int32_t            mInsufficientReadCount = 0;
+    int32_t            mInsufficientReadFrames = 0;
+    int32_t            mFramesReadTotal = 0;
+    int32_t            mFramesWrittenTotal = 0;
+    bool               mIsDone = false;
+
+    static constexpr int kLogPeriodMillis         = 1000;
+    static constexpr int kNumInputChannels        = 1;
+    static constexpr int kNumCallbacksToDrain     = 20;
+    static constexpr int kNumCallbacksToNotRead   = 0; // let input fill back up
+    static constexpr int kNumCallbacksToDiscard   = 20;
+    static constexpr int kDefaultHangTimeMillis   = 50;
+    static constexpr int kMaxGlitchEventsToSave   = 32;
+    static constexpr int kDefaultOutputSizeBursts = 2;
+};
+
+#endif // CTS_NATIVE_AUDIO_ANALYZER_H
diff --git a/apps/CtsVerifier/jni/audio_loopback/analyzer/GlitchAnalyzer.h b/apps/CtsVerifier/jni/audio_loopback/analyzer/GlitchAnalyzer.h
new file mode 100644
index 0000000..0adcd6e
--- /dev/null
+++ b/apps/CtsVerifier/jni/audio_loopback/analyzer/GlitchAnalyzer.h
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANALYZER_GLITCH_ANALYZER_H
+#define ANALYZER_GLITCH_ANALYZER_H
+
+#include <algorithm>
+#include <cctype>
+#include <iomanip>
+#include <iostream>
+
+#include "LatencyAnalyzer.h"
+#include "PseudoRandom.h"
+
+/**
+ * Output a steady sine wave and analyze the return signal.
+ *
+ * Use a cosine transform to measure the predicted magnitude and relative phase of the
+ * looped back sine wave. Then generate a predicted signal and compare with the actual signal.
+ */
+class GlitchAnalyzer : public LoopbackProcessor {
+public:
+
+    int32_t getState() const {
+        return mState;
+    }
+
+    double getPeakAmplitude() const {
+        return mPeakFollower.getLevel();
+    }
+
+    double getTolerance() {
+        return mTolerance;
+    }
+
+    void setTolerance(double tolerance) {
+        mTolerance = tolerance;
+        mScaledTolerance = mMagnitude * mTolerance;
+    }
+
+    void setMagnitude(double magnitude) {
+        mMagnitude = magnitude;
+        mScaledTolerance = mMagnitude * mTolerance;
+    }
+
+    int32_t getGlitchCount() const {
+        return mGlitchCount;
+    }
+
+    int32_t getStateFrameCount(int state) const {
+        return mStateFrameCounters[state];
+    }
+
+    double getSignalToNoiseDB() {
+        static const double threshold = 1.0e-14;
+        if (mMeanSquareSignal < threshold || mMeanSquareNoise < threshold) {
+            return 0.0;
+        } else {
+            double signalToNoise = mMeanSquareSignal / mMeanSquareNoise; // power ratio
+            double signalToNoiseDB = 10.0 * log(signalToNoise);
+            if (signalToNoiseDB < MIN_SNR_DB) {
+                ALOGD("ERROR - signal to noise ratio is too low! < %d dB. Adjust volume.",
+                     MIN_SNR_DB);
+                setResult(ERROR_VOLUME_TOO_LOW);
+            }
+            return signalToNoiseDB;
+        }
+    }
+
+    std::string analyze() override {
+        std::stringstream report;
+        report << "GlitchAnalyzer ------------------\n";
+        report << LOOPBACK_RESULT_TAG "peak.amplitude     = " << std::setw(8)
+               << getPeakAmplitude() << "\n";
+        report << LOOPBACK_RESULT_TAG "sine.magnitude     = " << std::setw(8)
+               << mMagnitude << "\n";
+        report << LOOPBACK_RESULT_TAG "rms.noise          = " << std::setw(8)
+               << mMeanSquareNoise << "\n";
+        report << LOOPBACK_RESULT_TAG "signal.to.noise.db = " << std::setw(8)
+               << getSignalToNoiseDB() << "\n";
+        report << LOOPBACK_RESULT_TAG "frames.accumulated = " << std::setw(8)
+               << mFramesAccumulated << "\n";
+        report << LOOPBACK_RESULT_TAG "sine.period        = " << std::setw(8)
+               << mSinePeriod << "\n";
+        report << LOOPBACK_RESULT_TAG "test.state         = " << std::setw(8)
+               << mState << "\n";
+        report << LOOPBACK_RESULT_TAG "frame.count        = " << std::setw(8)
+               << mFrameCounter << "\n";
+        // Did we ever get a lock?
+        bool gotLock = (mState == STATE_LOCKED) || (mGlitchCount > 0);
+        if (!gotLock) {
+            report << "ERROR - failed to lock on reference sine tone.\n";
+            setResult(ERROR_NO_LOCK);
+        } else {
+            // Only print if meaningful.
+            report << LOOPBACK_RESULT_TAG "glitch.count       = " << std::setw(8)
+                   << mGlitchCount << "\n";
+            report << LOOPBACK_RESULT_TAG "max.glitch         = " << std::setw(8)
+                   << mMaxGlitchDelta << "\n";
+            if (mGlitchCount > 0) {
+                report << "ERROR - number of glitches > 0\n";
+                setResult(ERROR_GLITCHES);
+            }
+        }
+        return report.str();
+    }
+
+    void printStatus() override {
+        ALOGD("st = %d, #gl = %3d,", mState, mGlitchCount);
+    }
+    /**
+     * Calculate the magnitude of the component of the input signal
+     * that matches the analysis frequency.
+     * Also calculate the phase that we can use to create a
+     * signal that matches that component.
+     * The phase will be between -PI and +PI.
+     */
+    double calculateMagnitude(double *phasePtr = nullptr) {
+        if (mFramesAccumulated == 0) {
+            return 0.0;
+        }
+        double sinMean = mSinAccumulator / mFramesAccumulated;
+        double cosMean = mCosAccumulator / mFramesAccumulated;
+        double magnitude = 2.0 * sqrt((sinMean * sinMean) + (cosMean * cosMean));
+        if (phasePtr != nullptr) {
+            double phase = M_PI_2 - atan2(sinMean, cosMean);
+            *phasePtr = phase;
+        }
+        return magnitude;
+    }
+
+    /**
+     * @param frameData contains microphone data with sine signal feedback
+     * @param channelCount
+     */
+    result_code processInputFrame(float *frameData, int /* channelCount */) override {
+        result_code result = RESULT_OK;
+
+        float sample = frameData[0];
+        float peak = mPeakFollower.process(sample);
+
+        // Force a periodic glitch to test the detector!
+        if (mForceGlitchDuration > 0) {
+            if (mForceGlitchCounter == 0) {
+                ALOGE("%s: force a glitch!!", __func__);
+                mForceGlitchCounter = getSampleRate();
+            } else if (mForceGlitchCounter <= mForceGlitchDuration) {
+                // Force an abrupt offset.
+                sample += (sample > 0.0) ? -0.5f : 0.5f;
+            }
+            --mForceGlitchCounter;
+        }
+
+        mStateFrameCounters[mState]++; // count how many frames we are in each state
+
+        switch (mState) {
+            case STATE_IDLE:
+                mDownCounter--;
+                if (mDownCounter <= 0) {
+                    mState = STATE_IMMUNE;
+                    mDownCounter = IMMUNE_FRAME_COUNT;
+                    mInputPhase = 0.0; // prevent spike at start
+                    mOutputPhase = 0.0;
+                }
+                break;
+
+            case STATE_IMMUNE:
+                mDownCounter--;
+                if (mDownCounter <= 0) {
+                    mState = STATE_WAITING_FOR_SIGNAL;
+                }
+                break;
+
+            case STATE_WAITING_FOR_SIGNAL:
+                if (peak > mThreshold) {
+                    mState = STATE_WAITING_FOR_LOCK;
+                    //ALOGD("%5d: switch to STATE_WAITING_FOR_LOCK", mFrameCounter);
+                    resetAccumulator();
+                }
+                break;
+
+            case STATE_WAITING_FOR_LOCK:
+                mSinAccumulator += sample * sinf(mInputPhase);
+                mCosAccumulator += sample * cosf(mInputPhase);
+                mFramesAccumulated++;
+                // Must be a multiple of the period or the calculation will not be accurate.
+                if (mFramesAccumulated == mSinePeriod * PERIODS_NEEDED_FOR_LOCK) {
+                    double phaseOffset = 0.0;
+                    setMagnitude(calculateMagnitude(&phaseOffset));
+//                    ALOGD("%s() mag = %f, offset = %f, prev = %f",
+//                            __func__, mMagnitude, mPhaseOffset, mPreviousPhaseOffset);
+                    if (mMagnitude > mThreshold) {
+                        if (abs(phaseOffset) < kMaxPhaseError) {
+                            mState = STATE_LOCKED;
+//                            ALOGD("%5d: switch to STATE_LOCKED", mFrameCounter);
+                        }
+                        // Adjust mInputPhase to match measured phase
+                        mInputPhase += phaseOffset;
+                    }
+                    resetAccumulator();
+                }
+                incrementInputPhase();
+                break;
+
+            case STATE_LOCKED: {
+                // Predict next sine value
+                double predicted = sinf(mInputPhase) * mMagnitude;
+                double diff = predicted - sample;
+                double absDiff = fabs(diff);
+                mMaxGlitchDelta = std::max(mMaxGlitchDelta, absDiff);
+                if (absDiff > mScaledTolerance) {
+                    result = ERROR_GLITCHES;
+                    onGlitchStart();
+//                    LOGI("diff glitch detected, absDiff = %g", absDiff);
+                } else {
+                    mSumSquareSignal += predicted * predicted;
+                    mSumSquareNoise += diff * diff;
+                    // Track incoming signal and slowly adjust magnitude to account
+                    // for drift in the DRC or AGC.
+                    mSinAccumulator += sample * sinf(mInputPhase);
+                    mCosAccumulator += sample * cosf(mInputPhase);
+                    mFramesAccumulated++;
+                    // Must be a multiple of the period or the calculation will not be accurate.
+                    if (mFramesAccumulated == mSinePeriod) {
+                        const double coefficient = 0.1;
+                        double phaseOffset = 0.0;
+                        double magnitude = calculateMagnitude(&phaseOffset);
+                        // One pole averaging filter.
+                        setMagnitude((mMagnitude * (1.0 - coefficient)) + (magnitude * coefficient));
+
+                        mMeanSquareNoise = mSumSquareNoise * mInverseSinePeriod;
+                        mMeanSquareSignal = mSumSquareSignal * mInverseSinePeriod;
+                        resetAccumulator();
+
+                        if (abs(phaseOffset) > kMaxPhaseError) {
+                            result = ERROR_GLITCHES;
+                            onGlitchStart();
+                            ALOGD("phase glitch detected, phaseOffset = %g", phaseOffset);
+                        } else if (mMagnitude < mThreshold) {
+                            result = ERROR_GLITCHES;
+                            onGlitchStart();
+                            ALOGD("magnitude glitch detected, mMagnitude = %g", mMagnitude);
+                        }
+                    }
+                }
+                incrementInputPhase();
+            } break;
+
+            case STATE_GLITCHING: {
+                // Predict next sine value
+                mGlitchLength++;
+                double predicted = sinf(mInputPhase) * mMagnitude;
+                double diff = predicted - sample;
+                double absDiff = fabs(diff);
+                mMaxGlitchDelta = std::max(mMaxGlitchDelta, absDiff);
+                if (absDiff < mScaledTolerance) { // close enough?
+                    // If we get a full sine period of non-glitch samples in a row then consider the glitch over.
+                    // We don't want to just consider a zero crossing the end of a glitch.
+                    if (mNonGlitchCount++ > mSinePeriod) {
+                        onGlitchEnd();
+                    }
+                } else {
+                    mNonGlitchCount = 0;
+                    if (mGlitchLength > (4 * mSinePeriod)) {
+                        relock();
+                    }
+                }
+                incrementInputPhase();
+            } break;
+
+            case NUM_STATES: // not a real state
+                break;
+        }
+
+        mFrameCounter++;
+
+        return result;
+    }
+
+    // advance and wrap phase
+    void incrementInputPhase() {
+        mInputPhase += mPhaseIncrement;
+        if (mInputPhase > M_PI) {
+            mInputPhase -= (2.0 * M_PI);
+        }
+    }
+
+    // advance and wrap phase
+    void incrementOutputPhase() {
+        mOutputPhase += mPhaseIncrement;
+        if (mOutputPhase > M_PI) {
+            mOutputPhase -= (2.0 * M_PI);
+        }
+    }
+
+    /**
+     * @param frameData upon return, contains the reference sine wave
+     * @param channelCount
+     */
+    result_code processOutputFrame(float *frameData, int channelCount) override {
+        float output = 0.0f;
+        // Output sine wave so we can measure it.
+        if (mState != STATE_IDLE) {
+            float sinOut = sinf(mOutputPhase);
+            incrementOutputPhase();
+            output = (sinOut * mOutputAmplitude)
+                     + (mWhiteNoise.nextRandomDouble() * kNoiseAmplitude);
+            // ALOGD("sin(%f) = %f, %f\n", mOutputPhase, sinOut,  mPhaseIncrement);
+        }
+        frameData[0] = output;
+        for (int i = 1; i < channelCount; i++) {
+            frameData[i] = 0.0f;
+        }
+        return RESULT_OK;
+    }
+
+    void onGlitchStart() {
+        mGlitchCount++;
+//        ALOGD("%5d: STARTED a glitch # %d", mFrameCounter, mGlitchCount);
+        mState = STATE_GLITCHING;
+        mGlitchLength = 1;
+        mNonGlitchCount = 0;
+    }
+
+    void onGlitchEnd() {
+//        ALOGD("%5d: ENDED a glitch # %d, length = %d", mFrameCounter, mGlitchCount, mGlitchLength);
+        mState = STATE_LOCKED;
+        resetAccumulator();
+    }
+
+    // reset the sine wave detector
+    void resetAccumulator() {
+        mFramesAccumulated = 0;
+        mSinAccumulator = 0.0;
+        mCosAccumulator = 0.0;
+        mSumSquareSignal = 0.0;
+        mSumSquareNoise = 0.0;
+    }
+
+    void relock() {
+//        ALOGD("relock: %d because of a very long %d glitch", mFrameCounter, mGlitchLength);
+        mState = STATE_WAITING_FOR_LOCK;
+        resetAccumulator();
+    }
+
+    void reset() override {
+        LoopbackProcessor::reset();
+        mState = STATE_IDLE;
+        mDownCounter = IDLE_FRAME_COUNT;
+        resetAccumulator();
+    }
+
+    void prepareToTest() override {
+        LoopbackProcessor::prepareToTest();
+        mSinePeriod = getSampleRate() / kTargetGlitchFrequency;
+        mOutputPhase = 0.0f;
+        mInverseSinePeriod = 1.0 / mSinePeriod;
+        mPhaseIncrement = 2.0 * M_PI * mInverseSinePeriod;
+        mGlitchCount = 0;
+        mMaxGlitchDelta = 0.0;
+        for (int i = 0; i < NUM_STATES; i++) {
+            mStateFrameCounters[i] = 0;
+        }
+    }
+
+private:
+
+    // These must match the values in GlitchActivity.java
+    enum sine_state_t {
+        STATE_IDLE,               // beginning
+        STATE_IMMUNE,             // ignoring input, waiting fo HW to settle
+        STATE_WAITING_FOR_SIGNAL, // looking for a loud signal
+        STATE_WAITING_FOR_LOCK,   // trying to lock onto the phase of the sine
+        STATE_LOCKED,             // locked on the sine wave, looking for glitches
+        STATE_GLITCHING,           // locked on the sine wave but glitching
+        NUM_STATES
+    };
+
+    enum constants {
+        // Arbitrary durations, assuming 48000 Hz
+        IDLE_FRAME_COUNT = 48 * 100,
+        IMMUNE_FRAME_COUNT = 48 * 100,
+        PERIODS_NEEDED_FOR_LOCK = 8,
+        MIN_SNR_DB = 65
+    };
+
+    static constexpr float kNoiseAmplitude = 0.00; // Used to experiment with warbling caused by DRC.
+    static constexpr int kTargetGlitchFrequency = 607;
+    static constexpr double kMaxPhaseError = M_PI * 0.05;
+
+    float   mTolerance = 0.10; // scaled from 0.0 to 1.0
+    double  mThreshold = 0.005;
+    int     mSinePeriod = 1; // this will be set before use
+    double  mInverseSinePeriod = 1.0;
+
+    int32_t mStateFrameCounters[NUM_STATES];
+
+    double  mPhaseIncrement = 0.0;
+    double  mInputPhase = 0.0;
+    double  mOutputPhase = 0.0;
+    double  mMagnitude = 0.0;
+    int32_t mFramesAccumulated = 0;
+    double  mSinAccumulator = 0.0;
+    double  mCosAccumulator = 0.0;
+    double  mMaxGlitchDelta = 0.0;
+    int32_t mGlitchCount = 0;
+    int32_t mNonGlitchCount = 0;
+    int32_t mGlitchLength = 0;
+    // This is used for processing every frame so we cache it here.
+    double  mScaledTolerance = 0.0;
+    int     mDownCounter = IDLE_FRAME_COUNT;
+    int32_t mFrameCounter = 0;
+    double  mOutputAmplitude = 0.75;
+
+    int32_t mForceGlitchDuration = 0; // if > 0 then force a glitch for debugging
+    int32_t mForceGlitchCounter = 4 * 48000; // count down and trigger at zero
+
+    // measure background noise continuously as a deviation from the expected signal
+    double  mSumSquareSignal = 0.0;
+    double  mSumSquareNoise = 0.0;
+    double  mMeanSquareSignal = 0.0;
+    double  mMeanSquareNoise = 0.0;
+
+    PeakDetector  mPeakFollower;
+
+    PseudoRandom  mWhiteNoise;
+
+    sine_state_t  mState = STATE_IDLE;
+};
+
+
+#endif //ANALYZER_GLITCH_ANALYZER_H
diff --git a/apps/CtsVerifier/jni/audio_loopback/analyzer/LatencyAnalyzer.h b/apps/CtsVerifier/jni/audio_loopback/analyzer/LatencyAnalyzer.h
new file mode 100644
index 0000000..59106cb
--- /dev/null
+++ b/apps/CtsVerifier/jni/audio_loopback/analyzer/LatencyAnalyzer.h
@@ -0,0 +1,604 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tools for measuring latency and for detecting glitches.
+ * These classes are pure math and can be used with any audio system.
+ */
+
+#ifndef ANALYZER_LATENCY_ANALYZER_H
+#define ANALYZER_LATENCY_ANALYZER_H
+
+#include <algorithm>
+#include <assert.h>
+#include <cctype>
+#include <iomanip>
+#include <iostream>
+#include <math.h>
+#include <memory>
+#include <sstream>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <vector>
+
+#include "PeakDetector.h"
+#include "PseudoRandom.h"
+#include "RandomPulseGenerator.h"
+
+
+#define LOOPBACK_RESULT_TAG  "RESULT: "
+
+static constexpr int32_t kDefaultSampleRate = 48000;
+static constexpr int32_t kMillisPerSecond   = 1000;
+static constexpr int32_t kMaxLatencyMillis  = 700;  // arbitrary and generous
+static constexpr double  kMinimumConfidence = 0.2;
+
+struct LatencyReport {
+    int32_t latencyInFrames = 0.0;
+    double confidence = 0.0;
+
+    void reset() {
+        latencyInFrames = 0;
+        confidence = 0.0;
+    }
+};
+
+// Calculate a normalized cross correlation.
+static double calculateNormalizedCorrelation(const float *a,
+                                             const float *b,
+                                             int windowSize) {
+    double correlation = 0.0;
+    double sumProducts = 0.0;
+    double sumSquares = 0.0;
+
+    // Correlate a against b.
+    for (int i = 0; i < windowSize; i++) {
+        float s1 = a[i];
+        float s2 = b[i];
+        // Use a normalized cross-correlation.
+        sumProducts += s1 * s2;
+        sumSquares += ((s1 * s1) + (s2 * s2));
+    }
+
+    if (sumSquares >= 1.0e-9) {
+        correlation = 2.0 * sumProducts / sumSquares;
+    }
+    return correlation;
+}
+
+static double calculateRootMeanSquare(float *data, int32_t numSamples) {
+    double sum = 0.0;
+    for (int32_t i = 0; i < numSamples; i++) {
+        float sample = data[i];
+        sum += sample * sample;
+    }
+    return sqrt(sum / numSamples);
+}
+
+/**
+ * Monophonic recording with processing.
+ */
+class AudioRecording
+{
+public:
+
+    void allocate(int maxFrames) {
+        mData = std::make_unique<float[]>(maxFrames);
+        mMaxFrames = maxFrames;
+    }
+
+    // Write SHORT data from the first channel.
+    int32_t write(int16_t *inputData, int32_t inputChannelCount, int32_t numFrames) {
+        // stop at end of buffer
+        if ((mFrameCounter + numFrames) > mMaxFrames) {
+            numFrames = mMaxFrames - mFrameCounter;
+        }
+        for (int i = 0; i < numFrames; i++) {
+            mData[mFrameCounter++] = inputData[i * inputChannelCount] * (1.0f / 32768);
+        }
+        return numFrames;
+    }
+
+    // Write FLOAT data from the first channel.
+    int32_t write(float *inputData, int32_t inputChannelCount, int32_t numFrames) {
+        // stop at end of buffer
+        if ((mFrameCounter + numFrames) > mMaxFrames) {
+            numFrames = mMaxFrames - mFrameCounter;
+        }
+        for (int i = 0; i < numFrames; i++) {
+            mData[mFrameCounter++] = inputData[i * inputChannelCount];
+        }
+        return numFrames;
+    }
+
+    // Write FLOAT data from the first channel.
+    int32_t write(float sample) {
+        // stop at end of buffer
+        if (mFrameCounter < mMaxFrames) {
+            mData[mFrameCounter++] = sample;
+            return 1;
+        }
+        return 0;
+    }
+
+    void clear() {
+        mFrameCounter = 0;
+    }
+    int32_t size() const {
+        return mFrameCounter;
+    }
+
+    bool isFull() const {
+        return mFrameCounter >= mMaxFrames;
+    }
+
+    float *getData() const {
+        return mData.get();
+    }
+
+    void setSampleRate(int32_t sampleRate) {
+        mSampleRate = sampleRate;
+    }
+
+    int32_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    /**
+     * Square the samples so they are all positive and so the peaks are emphasized.
+     */
+    void square() {
+        float *x = mData.get();
+        for (int i = 0; i < mFrameCounter; i++) {
+            x[i] *= x[i];
+        }
+    }
+
+    /**
+     * Amplify a signal so that the peak matches the specified target.
+     *
+     * @param target final max value
+     * @return gain applied to signal
+     */
+    float normalize(float target) {
+        float maxValue = 1.0e-9f;
+        for (int i = 0; i < mFrameCounter; i++) {
+            maxValue = std::max(maxValue, abs(mData[i]));
+        }
+        float gain = target / maxValue;
+        for (int i = 0; i < mFrameCounter; i++) {
+            mData[i] *= gain;
+        }
+        return gain;
+    }
+
+private:
+    std::unique_ptr<float[]> mData;
+    int32_t       mFrameCounter = 0;
+    int32_t       mMaxFrames = 0;
+    int32_t       mSampleRate = kDefaultSampleRate; // common default
+};
+
+static int measureLatencyFromPulse(AudioRecording &recorded,
+                                   AudioRecording &pulse,
+                                   LatencyReport *report) {
+
+    report->latencyInFrames = 0;
+    report->confidence = 0.0;
+
+    int numCorrelations = recorded.size() - pulse.size();
+    if (numCorrelations < 10) {
+        ALOGE("%s() recording too small = %d frames\n", __func__, recorded.size());
+        return -1;
+    }
+    std::unique_ptr<float[]> correlations= std::make_unique<float[]>(numCorrelations);
+
+    // Correlate pulse against the recorded data.
+    for (int i = 0; i < numCorrelations; i++) {
+        float correlation = (float) calculateNormalizedCorrelation(&recorded.getData()[i],
+                                                                   &pulse.getData()[0],
+                                                                   pulse.size());
+        correlations[i] = correlation;
+    }
+
+    // Find highest peak in correlation array.
+    float peakCorrelation = 0.0;
+    int peakIndex = -1;
+    for (int i = 0; i < numCorrelations; i++) {
+        float value = abs(correlations[i]);
+        if (value > peakCorrelation) {
+            peakCorrelation = value;
+            peakIndex = i;
+        }
+    }
+    if (peakIndex < 0) {
+        ALOGE("%s() no signal for correlation\n", __func__);
+        return -2;
+    }
+
+    report->latencyInFrames = peakIndex;
+    report->confidence = peakCorrelation;
+
+    return 0;
+}
+
+// ====================================================================================
+class LoopbackProcessor {
+public:
+    virtual ~LoopbackProcessor() = default;
+
+    enum result_code {
+        RESULT_OK = 0,
+        ERROR_NOISY = -99,
+        ERROR_VOLUME_TOO_LOW,
+        ERROR_VOLUME_TOO_HIGH,
+        ERROR_CONFIDENCE,
+        ERROR_INVALID_STATE,
+        ERROR_GLITCHES,
+        ERROR_NO_LOCK
+    };
+
+    virtual void prepareToTest() {
+        reset();
+    }
+
+    virtual void reset() {
+        mResult = 0;
+        mResetCount++;
+    }
+
+    virtual result_code processInputFrame(float *frameData, int channelCount) = 0;
+    virtual result_code processOutputFrame(float *frameData, int channelCount) = 0;
+
+    void process(float *inputData, int inputChannelCount, int numInputFrames,
+                 float *outputData, int outputChannelCount, int numOutputFrames) {
+        int numBoth = std::min(numInputFrames, numOutputFrames);
+        // Process one frame at a time.
+        for (int i = 0; i < numBoth; i++) {
+            processInputFrame(inputData, inputChannelCount);
+            inputData += inputChannelCount;
+            processOutputFrame(outputData, outputChannelCount);
+            outputData += outputChannelCount;
+        }
+        // If there is more input than output.
+        for (int i = numBoth; i < numInputFrames; i++) {
+            processInputFrame(inputData, inputChannelCount);
+            inputData += inputChannelCount;
+        }
+        // If there is more output than input.
+        for (int i = numBoth; i < numOutputFrames; i++) {
+            processOutputFrame(outputData, outputChannelCount);
+            outputData += outputChannelCount;
+        }
+    }
+
+    virtual std::string analyze() = 0;
+
+    virtual void printStatus() {};
+
+    int32_t getResult() {
+        return mResult;
+    }
+
+    void setResult(int32_t result) {
+        mResult = result;
+    }
+
+    virtual bool isDone() {
+        return false;
+    }
+
+    virtual int save(const char *fileName) {
+        (void) fileName;
+        return -1;
+    }
+
+    virtual int load(const char *fileName) {
+        (void) fileName;
+        return -1;
+    }
+
+    virtual void setSampleRate(int32_t sampleRate) {
+        mSampleRate = sampleRate;
+    }
+
+    int32_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    int32_t getResetCount() const {
+        return mResetCount;
+    }
+
+    /** Called when not enough input frames could be read after synchronization.
+     */
+    virtual void onInsufficientRead() {
+        reset();
+    }
+
+protected:
+    int32_t   mResetCount = 0;
+
+private:
+    int32_t mSampleRate = kDefaultSampleRate;
+    int32_t mResult = 0;
+};
+
+class LatencyAnalyzer : public LoopbackProcessor {
+public:
+
+    LatencyAnalyzer() : LoopbackProcessor() {}
+    virtual ~LatencyAnalyzer() = default;
+
+    virtual int32_t getProgress() const = 0;
+
+    virtual int getState() = 0;
+
+    // @return latency in frames
+    virtual int32_t getMeasuredLatency() = 0;
+
+    virtual double getMeasuredConfidence() = 0;
+
+    virtual double getBackgroundRMS() = 0;
+
+    virtual double getSignalRMS() = 0;
+
+};
+
+// ====================================================================================
+/**
+ * Measure latency given a loopback stream data.
+ * Use an encoded bit train as the sound source because it
+ * has an unambiguous correlation value.
+ * Uses a state machine to cycle through various stages.
+ *
+ */
+class PulseLatencyAnalyzer : public LatencyAnalyzer {
+public:
+
+    PulseLatencyAnalyzer() : LatencyAnalyzer() {
+        int32_t maxLatencyFrames = getSampleRate() * kMaxLatencyMillis / kMillisPerSecond;
+        int32_t numPulseBits = getSampleRate() * kPulseLengthMillis
+                / (kFramesPerEncodedBit * kMillisPerSecond);
+        int32_t  pulseLength = numPulseBits * kFramesPerEncodedBit;
+        mFramesToRecord = pulseLength + maxLatencyFrames;
+        mAudioRecording.allocate(mFramesToRecord);
+        mAudioRecording.setSampleRate(getSampleRate());
+        generateRandomPulse(pulseLength);
+    }
+
+    void generateRandomPulse(int32_t pulseLength) {
+        mPulse.allocate(pulseLength);
+        RandomPulseGenerator pulser(kFramesPerEncodedBit);
+        for (int i = 0; i < pulseLength; i++) {
+            mPulse.write(pulser.nextFloat());
+        }
+    }
+
+    int getState() override {
+        return mState;
+    }
+
+    void setSampleRate(int32_t sampleRate) override {
+        LoopbackProcessor::setSampleRate(sampleRate);
+        mAudioRecording.setSampleRate(sampleRate);
+    }
+
+    void reset() override {
+        LoopbackProcessor::reset();
+        mDownCounter = getSampleRate() / 2;
+        mLoopCounter = 0;
+
+        mPulseCursor = 0;
+        mBackgroundSumSquare = 0.0f;
+        mBackgroundSumCount = 0;
+        mBackgroundRMS = 0.0f;
+        mSignalRMS = 0.0f;
+
+        mState = STATE_MEASURE_BACKGROUND;
+        mAudioRecording.clear();
+        mLatencyReport.reset();
+    }
+
+    bool hasEnoughData() {
+        return mAudioRecording.isFull();
+    }
+
+    bool isDone() override {
+        return mState == STATE_DONE;
+    }
+
+    int32_t getProgress() const override {
+        return mAudioRecording.size();
+    }
+
+    std::string analyze() override {
+        std::stringstream report;
+        report << "PulseLatencyAnalyzer ---------------\n";
+        report << LOOPBACK_RESULT_TAG "test.state             = "
+                << std::setw(8) << mState << "\n";
+        report << LOOPBACK_RESULT_TAG "test.state.name        = "
+                << convertStateToText(mState) << "\n";
+        report << LOOPBACK_RESULT_TAG "background.rms         = "
+                << std::setw(8) << mBackgroundRMS << "\n";
+
+        int32_t newResult = RESULT_OK;
+        if (mState != STATE_GOT_DATA) {
+            report << "WARNING - Bad state. Check volume on device.\n";
+            // setResult(ERROR_INVALID_STATE);
+        } else {
+            float gain = mAudioRecording.normalize(1.0f);
+            measureLatencyFromPulse(mAudioRecording,
+                                    mPulse,
+                                    &mLatencyReport);
+
+            if (mLatencyReport.confidence < kMinimumConfidence) {
+                report << "   ERROR - confidence too low!";
+                newResult = ERROR_CONFIDENCE;
+            } else {
+                mSignalRMS = calculateRootMeanSquare(
+                        &mAudioRecording.getData()[mLatencyReport.latencyInFrames], mPulse.size())
+                                / gain;
+            }
+            double latencyMillis = kMillisPerSecond * (double) mLatencyReport.latencyInFrames
+                                   / getSampleRate();
+            report << LOOPBACK_RESULT_TAG "latency.frames         = " << std::setw(8)
+                   << mLatencyReport.latencyInFrames << "\n";
+            report << LOOPBACK_RESULT_TAG "latency.msec           = " << std::setw(8)
+                   << latencyMillis << "\n";
+            report << LOOPBACK_RESULT_TAG "latency.confidence     = " << std::setw(8)
+                   << mLatencyReport.confidence << "\n";
+        }
+        mState = STATE_DONE;
+        if (getResult() == RESULT_OK) {
+            setResult(newResult);
+        }
+
+        return report.str();
+    }
+
+    int32_t getMeasuredLatency() override {
+        return mLatencyReport.latencyInFrames;
+    }
+
+    double getMeasuredConfidence() override {
+        return mLatencyReport.confidence;
+    }
+
+    double getBackgroundRMS() override {
+        return mBackgroundRMS;
+    }
+
+    double getSignalRMS() override {
+        return mSignalRMS;
+    }
+
+    bool isRecordingComplete() {
+        return mState == STATE_GOT_DATA;
+    }
+
+    void printStatus() override {
+        ALOGD("latency: st = %d = %s", mState, convertStateToText(mState));
+    }
+
+    result_code processInputFrame(float *frameData, int channelCount) override {
+        echo_state nextState = mState;
+        mLoopCounter++;
+
+        switch (mState) {
+            case STATE_MEASURE_BACKGROUND:
+                // Measure background RMS on channel 0
+                mBackgroundSumSquare += frameData[0] * frameData[0];
+                mBackgroundSumCount++;
+                mDownCounter--;
+                if (mDownCounter <= 0) {
+                    mBackgroundRMS = sqrtf(mBackgroundSumSquare / mBackgroundSumCount);
+                    nextState = STATE_IN_PULSE;
+                    mPulseCursor = 0;
+                }
+                break;
+
+            case STATE_IN_PULSE:
+                // Record input until the mAudioRecording is full.
+                mAudioRecording.write(frameData, channelCount, 1);
+                if (hasEnoughData()) {
+                    nextState = STATE_GOT_DATA;
+                }
+                break;
+
+            case STATE_GOT_DATA:
+            case STATE_DONE:
+            default:
+                break;
+        }
+
+        mState = nextState;
+        return RESULT_OK;
+    }
+
+    result_code processOutputFrame(float *frameData, int channelCount) override {
+        switch (mState) {
+            case STATE_IN_PULSE:
+                if (mPulseCursor < mPulse.size()) {
+                    float pulseSample = mPulse.getData()[mPulseCursor++];
+                    for (int i = 0; i < channelCount; i++) {
+                        frameData[i] = pulseSample;
+                    }
+                } else {
+                    for (int i = 0; i < channelCount; i++) {
+                        frameData[i] = 0;
+                    }
+                }
+                break;
+
+            case STATE_MEASURE_BACKGROUND:
+            case STATE_GOT_DATA:
+            case STATE_DONE:
+            default:
+                for (int i = 0; i < channelCount; i++) {
+                    frameData[i] = 0.0f; // silence
+                }
+                break;
+        }
+
+        return RESULT_OK;
+    }
+
+private:
+
+    enum echo_state {
+        STATE_MEASURE_BACKGROUND,
+        STATE_IN_PULSE,
+        STATE_GOT_DATA, // must match RoundTripLatencyActivity.java
+        STATE_DONE,
+    };
+
+    const char *convertStateToText(echo_state state) {
+        switch (state) {
+            case STATE_MEASURE_BACKGROUND:
+                return "INIT";
+            case STATE_IN_PULSE:
+                return "PULSE";
+            case STATE_GOT_DATA:
+                return "GOT_DATA";
+            case STATE_DONE:
+                return "DONE";
+        }
+        return "UNKNOWN";
+    }
+
+    int32_t         mDownCounter = 500;
+    int32_t         mLoopCounter = 0;
+    echo_state      mState = STATE_MEASURE_BACKGROUND;
+
+    static constexpr int32_t kFramesPerEncodedBit = 8; // multiple of 2
+    static constexpr int32_t kPulseLengthMillis = 500;
+
+    AudioRecording     mPulse;
+    int32_t            mPulseCursor = 0;
+
+    double             mBackgroundSumSquare = 0.0;
+    int32_t            mBackgroundSumCount = 0;
+    double             mBackgroundRMS = 0.0;
+    double             mSignalRMS = 0.0;
+    int32_t            mFramesToRecord = 0;
+
+    AudioRecording     mAudioRecording; // contains only the input after starting the pulse
+    LatencyReport      mLatencyReport;
+};
+
+#endif // ANALYZER_LATENCY_ANALYZER_H
diff --git a/apps/CtsVerifier/jni/audio_loopback/analyzer/ManchesterEncoder.h b/apps/CtsVerifier/jni/audio_loopback/analyzer/ManchesterEncoder.h
new file mode 100644
index 0000000..3f7eebb
--- /dev/null
+++ b/apps/CtsVerifier/jni/audio_loopback/analyzer/ManchesterEncoder.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANALYZER_MANCHESTER_ENCODER_H
+#define ANALYZER_MANCHESTER_ENCODER_H
+
+#include <cstdint>
+
+/**
+ * Encode bytes using Manchester Coding scheme.
+ *
+ * Manchester Code is self clocking.
+ * There is a transition in the middle of every bit.
+ * Zero is high then low.
+ * One is low then high.
+ *
+ * This avoids having long DC sections that would droop when
+ * passed though analog circuits with AC coupling.
+ *
+ * IEEE 802.3 compatible.
+ */
+
+class ManchesterEncoder {
+public:
+    ManchesterEncoder(int samplesPerPulse)
+            : mSamplesPerPulse(samplesPerPulse)
+            , mSamplesPerPulseHalf(samplesPerPulse / 2)
+            , mCursor(samplesPerPulse) {
+    }
+
+    virtual ~ManchesterEncoder() = default;
+
+    /**
+     * This will be called when the next byte is needed.
+     * @return
+     */
+    virtual uint8_t onNextByte() = 0;
+
+    /**
+     * Generate the next floating point sample.
+     * @return
+     */
+    virtual float nextFloat() {
+        advanceSample();
+        if (mCurrentBit) {
+            return (mCursor < mSamplesPerPulseHalf) ? -1.0f : 1.0f; // one
+        } else {
+            return (mCursor < mSamplesPerPulseHalf) ? 1.0f : -1.0f; // zero
+        }
+    }
+
+protected:
+    /**
+     * This will be called when a new bit is ready to be encoded.
+     * It can be used to prepare the encoded samples.
+     * @param current
+     */
+    virtual void onNextBit(bool /* current */) {};
+
+    void advanceSample() {
+        // Are we ready for a new bit?
+        if (++mCursor >= mSamplesPerPulse) {
+            mCursor = 0;
+            if (mBitsLeft == 0) {
+                mCurrentByte = onNextByte();
+                mBitsLeft = 8;
+            }
+            --mBitsLeft;
+            mCurrentBit = (mCurrentByte >> mBitsLeft) & 1;
+            onNextBit(mCurrentBit);
+        }
+    }
+
+    bool getCurrentBit() {
+        return mCurrentBit;
+    }
+
+    const int mSamplesPerPulse;
+    const int mSamplesPerPulseHalf;
+    int       mCursor;
+    int       mBitsLeft = 0;
+    uint8_t   mCurrentByte = 0;
+    bool      mCurrentBit = false;
+};
+#endif //ANALYZER_MANCHESTER_ENCODER_H
diff --git a/apps/CtsVerifier/jni/audio_loopback/analyzer/PeakDetector.h b/apps/CtsVerifier/jni/audio_loopback/analyzer/PeakDetector.h
new file mode 100644
index 0000000..e407eac
--- /dev/null
+++ b/apps/CtsVerifier/jni/audio_loopback/analyzer/PeakDetector.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANALYZER_PEAK_DETECTOR_H
+#define ANALYZER_PEAK_DETECTOR_H
+
+#include <math.h>
+
+/**
+ * Measure a peak envelope by rising with the peaks,
+ * and decaying exponentially after each peak.
+ * The absolute value of the input signal is used.
+ */
+class PeakDetector {
+public:
+
+    void reset() {
+        mLevel = 0.0;
+    }
+
+    double process(double input) {
+        mLevel *= mDecay; // exponential decay
+        input = fabs(input);
+        // never fall below the input signal
+        if (input > mLevel) {
+            mLevel = input;
+        }
+        return mLevel;
+    }
+
+    double getLevel() const {
+        return mLevel;
+    }
+
+    double getDecay() const {
+        return mDecay;
+    }
+
+    /**
+     * Multiply the level by this amount on every iteration.
+     * This provides an exponential decay curve.
+     * A value just under 1.0 is best, for example, 0.99;
+     * @param decay scale level for each input
+     */
+    void setDecay(double decay) {
+        mDecay = decay;
+    }
+
+private:
+    static constexpr double kDefaultDecay = 0.99f;
+
+    double mLevel = 0.0;
+    double mDecay = kDefaultDecay;
+};
+#endif //ANALYZER_PEAK_DETECTOR_H
diff --git a/apps/CtsVerifier/jni/audio_loopback/analyzer/PseudoRandom.h b/apps/CtsVerifier/jni/audio_loopback/analyzer/PseudoRandom.h
new file mode 100644
index 0000000..d8f5894
--- /dev/null
+++ b/apps/CtsVerifier/jni/audio_loopback/analyzer/PseudoRandom.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANALYZER_PSEUDORANDOM_H
+#define ANALYZER_PSEUDORANDOM_H
+
+#include <cctype>
+
+class PseudoRandom {
+public:
+    PseudoRandom(int64_t seed = 99887766)
+            :    mSeed(seed)
+    {}
+
+    /**
+     * Returns the next random double from -1.0 to 1.0
+     *
+     * @return value from -1.0 to 1.0
+     */
+    double nextRandomDouble() {
+        return nextRandomInteger() * (0.5 / (((int32_t)1) << 30));
+    }
+
+    /** Calculate random 32 bit number using linear-congruential method
+     * with known real-time performance.
+     */
+    int32_t nextRandomInteger() {
+#if __has_builtin(__builtin_mul_overflow) && __has_builtin(__builtin_add_overflow)
+        int64_t prod;
+        // Use values for 64-bit sequence from MMIX by Donald Knuth.
+        __builtin_mul_overflow(mSeed, (int64_t)6364136223846793005, &prod);
+        __builtin_add_overflow(prod, (int64_t)1442695040888963407, &mSeed);
+#else
+        mSeed = (mSeed * (int64_t)6364136223846793005) + (int64_t)1442695040888963407;
+#endif
+        return (int32_t) (mSeed >> 32); // The higher bits have a longer sequence.
+    }
+
+private:
+    int64_t mSeed;
+};
+
+#endif //ANALYZER_PSEUDORANDOM_H
diff --git a/apps/CtsVerifier/jni/audio_loopback/analyzer/RandomPulseGenerator.h b/apps/CtsVerifier/jni/audio_loopback/analyzer/RandomPulseGenerator.h
new file mode 100644
index 0000000..b057d09
--- /dev/null
+++ b/apps/CtsVerifier/jni/audio_loopback/analyzer/RandomPulseGenerator.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANALYZER_RANDOM_PULSE_GENERATOR_H
+#define ANALYZER_RANDOM_PULSE_GENERATOR_H
+
+#include <stdlib.h>
+#include "RoundedManchesterEncoder.h"
+
+/**
+ * Encode random ones and zeros using Manchester Code per IEEE 802.3.
+ */
+class RandomPulseGenerator : public RoundedManchesterEncoder {
+public:
+    RandomPulseGenerator(int samplesPerPulse)
+    : RoundedManchesterEncoder(samplesPerPulse) {
+    }
+
+    virtual ~RandomPulseGenerator() = default;
+
+    /**
+     * This will be called when the next byte is needed.
+     * @return random byte
+     */
+    uint8_t onNextByte() override {
+        return static_cast<uint8_t>(rand());
+    }
+};
+
+#endif //ANALYZER_RANDOM_PULSE_GENERATOR_H
diff --git a/apps/CtsVerifier/jni/audio_loopback/analyzer/RoundedManchesterEncoder.h b/apps/CtsVerifier/jni/audio_loopback/analyzer/RoundedManchesterEncoder.h
new file mode 100644
index 0000000..76f57e7
--- /dev/null
+++ b/apps/CtsVerifier/jni/audio_loopback/analyzer/RoundedManchesterEncoder.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANALYZER_ROUNDED_MANCHESTER_ENCODER_H
+#define ANALYZER_ROUNDED_MANCHESTER_ENCODER_H
+
+#include <math.h>
+#include <memory.h>
+#include <stdlib.h>
+#include "ManchesterEncoder.h"
+
+/**
+ * Encode bytes using Manchester Code.
+ * Round the edges using a half cosine to reduce ringing caused by a hard edge.
+ */
+
+class RoundedManchesterEncoder : public ManchesterEncoder {
+public:
+    RoundedManchesterEncoder(int samplesPerPulse)
+            : ManchesterEncoder(samplesPerPulse) {
+        int rampSize = samplesPerPulse / 4;
+        mZeroAfterZero = std::make_unique<float[]>(samplesPerPulse);
+        mZeroAfterOne = std::make_unique<float[]>(samplesPerPulse);
+
+        int sampleIndex = 0;
+        for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
+            float phase = (rampIndex + 1) * M_PI / rampSize;
+            float sample = -cosf(phase);
+            mZeroAfterZero[sampleIndex] = sample;
+            mZeroAfterOne[sampleIndex] = 1.0f;
+            sampleIndex++;
+        }
+        for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
+            mZeroAfterZero[sampleIndex] = 1.0f;
+            mZeroAfterOne[sampleIndex] = 1.0f;
+            sampleIndex++;
+        }
+        for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
+            float phase = (rampIndex + 1) * M_PI / rampSize;
+            float sample = cosf(phase);
+            mZeroAfterZero[sampleIndex] = sample;
+            mZeroAfterOne[sampleIndex] = sample;
+            sampleIndex++;
+        }
+        for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
+            mZeroAfterZero[sampleIndex] = -1.0f;
+            mZeroAfterOne[sampleIndex] = -1.0f;
+            sampleIndex++;
+        }
+    }
+
+    void onNextBit(bool current) override {
+        // Do we need to use the rounded edge?
+        mCurrentSamples = (current ^ mPreviousBit)
+                          ? mZeroAfterOne.get()
+                          : mZeroAfterZero.get();
+        mPreviousBit = current;
+    }
+
+    float nextFloat() override {
+        advanceSample();
+        float output = mCurrentSamples[mCursor];
+        if (getCurrentBit()) output = -output;
+        return output;
+    }
+
+private:
+
+    bool mPreviousBit = false;
+    float *mCurrentSamples = nullptr;
+    std::unique_ptr<float[]> mZeroAfterZero;
+    std::unique_ptr<float[]> mZeroAfterOne;
+};
+
+#endif //ANALYZER_ROUNDED_MANCHESTER_ENCODER_H
diff --git a/apps/CtsVerifier/jni/audio_loopback/audio_utils/atomic.c b/apps/CtsVerifier/jni/audio_loopback/audio_utils/atomic.c
deleted file mode 100644
index db2b3fc..0000000
--- a/apps/CtsVerifier/jni/audio_loopback/audio_utils/atomic.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "atomic.h"
-
-#include <stdatomic.h>
-
-int32_t android_atomic_acquire_load(volatile const int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    return atomic_load_explicit(a, memory_order_acquire);
-}
-
-void android_atomic_release_store(int32_t value, volatile int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    atomic_store_explicit(a, value, memory_order_release);
-}
diff --git a/apps/CtsVerifier/jni/audio_loopback/audio_utils/atomic.h b/apps/CtsVerifier/jni/audio_loopback/audio_utils/atomic.h
deleted file mode 100644
index 535c926..0000000
--- a/apps/CtsVerifier/jni/audio_loopback/audio_utils/atomic.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_ATOMIC_H
-#define ANDROID_AUDIO_ATOMIC_H
-
-#include <stdlib.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-int32_t android_atomic_acquire_load(volatile const int32_t* addr);
-void android_atomic_release_store(int32_t value, volatile int32_t* addr);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // ANDROID_AUDIO_ATOMIC_H
diff --git a/apps/CtsVerifier/jni/audio_loopback/audio_utils/fifo.c b/apps/CtsVerifier/jni/audio_loopback/audio_utils/fifo.c
deleted file mode 100644
index ea9a8d1..0000000
--- a/apps/CtsVerifier/jni/audio_loopback/audio_utils/fifo.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "audio_utils_fifo"
-
-#include <stdlib.h>
-#include <string.h>
-#include "fifo.h"
-#include "roundup.h"
-#include "atomic.h"
-//#include <cutils/log.h>
-#define ALOG_ASSERT(exp)
-
-void audio_utils_fifo_init(struct audio_utils_fifo *fifo, size_t frameCount, size_t frameSize,
-        void *buffer)
-{
-    // We would need a 64-bit roundup to support larger frameCount.
-    ALOG_ASSERT(fifo != NULL && frameCount > 0 && frameSize > 0 && buffer != NULL);
-    fifo->mFrameCount = frameCount;
-    fifo->mFrameCountP2 = roundup(frameCount);
-    fifo->mFudgeFactor = fifo->mFrameCountP2 - fifo->mFrameCount;
-    fifo->mFrameSize = frameSize;
-    fifo->mBuffer = buffer;
-    fifo->mFront = 0;
-    fifo->mRear = 0;
-}
-
-void audio_utils_fifo_deinit(struct audio_utils_fifo *fifo __unused)
-{
-}
-
-// Return a new index as the sum of an old index (either mFront or mRear) and a specified increment.
-static inline int32_t audio_utils_fifo_sum(struct audio_utils_fifo *fifo, int32_t index,
-        uint32_t increment)
-{
-    if (fifo->mFudgeFactor) {
-        uint32_t mask = fifo->mFrameCountP2 - 1;
-        ALOG_ASSERT((index & mask) < fifo->mFrameCount);
-        ALOG_ASSERT(/*0 <= increment &&*/ increment <= fifo->mFrameCountP2);
-        if ((index & mask) + increment >= fifo->mFrameCount) {
-            increment += fifo->mFudgeFactor;
-        }
-        index += increment;
-        ALOG_ASSERT((index & mask) < fifo->mFrameCount);
-        return index;
-    } else {
-        return index + increment;
-    }
-}
-
-// Return the difference between two indices: rear - front, where 0 <= difference <= mFrameCount.
-static inline size_t audio_utils_fifo_diff(struct audio_utils_fifo *fifo, int32_t rear,
-        int32_t front)
-{
-    int32_t diff = rear - front;
-    if (fifo->mFudgeFactor) {
-        uint32_t mask = ~(fifo->mFrameCountP2 - 1);
-        int32_t genDiff = (rear & mask) - (front & mask);
-        if (genDiff != 0) {
-            ALOG_ASSERT(genDiff == (int32_t) fifo->mFrameCountP2);
-            diff -= fifo->mFudgeFactor;
-        }
-    }
-    // FIFO should not be overfull
-    ALOG_ASSERT(0 <= diff && diff <= (int32_t) fifo->mFrameCount);
-    return (size_t) diff;
-}
-
-ssize_t audio_utils_fifo_write(struct audio_utils_fifo *fifo, const void *buffer, size_t count)
-{
-    int32_t front = android_atomic_acquire_load(&fifo->mFront);
-    int32_t rear = fifo->mRear;
-    size_t availToWrite = fifo->mFrameCount - audio_utils_fifo_diff(fifo, rear, front);
-    if (availToWrite > count) {
-        availToWrite = count;
-    }
-    rear &= fifo->mFrameCountP2 - 1;
-    size_t part1 = fifo->mFrameCount - rear;
-    if (part1 > availToWrite) {
-        part1 = availToWrite;
-    }
-    if (part1 > 0) {
-        memcpy((char *) fifo->mBuffer + (rear * fifo->mFrameSize), buffer,
-                part1 * fifo->mFrameSize);
-        size_t part2 = availToWrite - part1;
-        if (part2 > 0) {
-            memcpy(fifo->mBuffer, (char *) buffer + (part1 * fifo->mFrameSize),
-                    part2 * fifo->mFrameSize);
-        }
-        android_atomic_release_store(audio_utils_fifo_sum(fifo, fifo->mRear, availToWrite),
-                &fifo->mRear);
-    }
-    return availToWrite;
-}
-
-ssize_t audio_utils_fifo_read(struct audio_utils_fifo *fifo, void *buffer, size_t count)
-{
-    int32_t rear = android_atomic_acquire_load(&fifo->mRear);
-    int32_t front = fifo->mFront;
-    size_t availToRead = audio_utils_fifo_diff(fifo, rear, front);
-    if (availToRead > count) {
-        availToRead = count;
-    }
-    front &= fifo->mFrameCountP2 - 1;
-    size_t part1 = fifo->mFrameCount - front;
-    if (part1 > availToRead) {
-        part1 = availToRead;
-    }
-    if (part1 > 0) {
-        memcpy(buffer, (char *) fifo->mBuffer + (front * fifo->mFrameSize),
-                part1 * fifo->mFrameSize);
-        size_t part2 = availToRead - part1;
-        if (part2 > 0) {
-            memcpy((char *) buffer + (part1 * fifo->mFrameSize), fifo->mBuffer,
-                    part2 * fifo->mFrameSize);
-        }
-        android_atomic_release_store(audio_utils_fifo_sum(fifo, fifo->mFront, availToRead),
-                &fifo->mFront);
-    }
-    return availToRead;
-}
diff --git a/apps/CtsVerifier/jni/audio_loopback/audio_utils/fifo.h b/apps/CtsVerifier/jni/audio_loopback/audio_utils/fifo.h
deleted file mode 100644
index ba4c5c6..0000000
--- a/apps/CtsVerifier/jni/audio_loopback/audio_utils/fifo.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_FIFO_H
-#define ANDROID_AUDIO_FIFO_H
-
-#include <stdlib.h>
-
-// FIXME use atomic_int_least32_t and new atomic operations instead of legacy Android ones
-// #include <stdatomic.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Single writer, single reader non-blocking FIFO.
-// Writer and reader must be in same process.
-
-// No user-serviceable parts within.
-struct audio_utils_fifo {
-    // These fields are const after initialization
-    size_t     mFrameCount;   // max number of significant frames to be stored in the FIFO > 0
-    size_t     mFrameCountP2; // roundup(mFrameCount)
-    size_t     mFudgeFactor;  // mFrameCountP2 - mFrameCount, the number of "wasted" frames after
-                              // the end of mBuffer.  Only the indices are wasted, not any memory.
-    size_t     mFrameSize;    // size of each frame in bytes
-    void      *mBuffer;       // pointer to caller-allocated buffer of size mFrameCount frames
-
-    volatile int32_t mFront; // frame index of first frame slot available to read, or read index
-    volatile int32_t mRear;  // frame index of next frame slot available to write, or write index
-};
-
-// Initialize a FIFO object.
-// Input parameters:
-//  fifo        Pointer to the FIFO object.
-//  frameCount  Max number of significant frames to be stored in the FIFO > 0.
-//              If writes and reads always use the same count, and that count is a divisor of
-//              frameCount, then the writes and reads will never do a partial transfer.
-//  frameSize   Size of each frame in bytes.
-//  buffer      Pointer to a caller-allocated buffer of frameCount frames.
-void audio_utils_fifo_init(struct audio_utils_fifo *fifo, size_t frameCount, size_t frameSize,
-        void *buffer);
-
-// De-initialize a FIFO object.
-// Input parameters:
-//  fifo        Pointer to the FIFO object.
-void audio_utils_fifo_deinit(struct audio_utils_fifo *fifo);
-
-// Write to FIFO.
-// Input parameters:
-//  fifo        Pointer to the FIFO object.
-//  buffer      Pointer to source buffer containing 'count' frames of data.
-// Returns actual number of frames written <= count.
-// The actual transfer count may be zero if the FIFO is full,
-// or partial if the FIFO was almost full.
-// A negative return value indicates an error.  Currently there are no errors defined.
-ssize_t audio_utils_fifo_write(struct audio_utils_fifo *fifo, const void *buffer, size_t count);
-
-// Read from FIFO.
-// Input parameters:
-//  fifo        Pointer to the FIFO object.
-//  buffer      Pointer to destination buffer to be filled with up to 'count' frames of data.
-// Returns actual number of frames read <= count.
-// The actual transfer count may be zero if the FIFO is empty,
-// or partial if the FIFO was almost empty.
-// A negative return value indicates an error.  Currently there are no errors defined.
-ssize_t audio_utils_fifo_read(struct audio_utils_fifo *fifo, void *buffer, size_t count);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // !ANDROID_AUDIO_FIFO_H
diff --git a/apps/CtsVerifier/jni/audio_loopback/audio_utils/roundup.c b/apps/CtsVerifier/jni/audio_loopback/audio_utils/roundup.c
deleted file mode 100644
index 4f9af6a..0000000
--- a/apps/CtsVerifier/jni/audio_loopback/audio_utils/roundup.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "roundup.h"
-
-unsigned roundup(unsigned v)
-{
-    // __builtin_clz is undefined for zero input
-    if (v == 0) {
-        v = 1;
-    }
-    int lz = __builtin_clz((int) v);
-    unsigned rounded = ((unsigned) 0x80000000) >> lz;
-    // 0x800000001 and higher are actually rounded _down_ to prevent overflow
-    if (v > rounded && lz > 0) {
-        rounded <<= 1;
-    }
-    return rounded;
-}
diff --git a/apps/CtsVerifier/jni/audio_loopback/audio_utils/roundup.h b/apps/CtsVerifier/jni/audio_loopback/audio_utils/roundup.h
deleted file mode 100644
index ad34289..0000000
--- a/apps/CtsVerifier/jni/audio_loopback/audio_utils/roundup.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_ROUNDUP_H
-#define ANDROID_AUDIO_ROUNDUP_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Round up to the next highest power of 2
-unsigned roundup(unsigned v);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // ANDROID_AUDIO_ROUNDUP_H
diff --git a/apps/CtsVerifier/jni/audio_loopback/jni-bridge.cpp b/apps/CtsVerifier/jni/audio_loopback/jni-bridge.cpp
new file mode 100644
index 0000000..a851cbe
--- /dev/null
+++ b/apps/CtsVerifier/jni/audio_loopback/jni-bridge.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <cstring>
+#include <jni.h>
+#include <stdint.h>
+
+#include "NativeAudioAnalyzer.h"
+
+extern "C" {
+
+JNIEXPORT jlong JNICALL Java_com_android_cts_verifier_audio_NativeAnalyzerThread_openAudio
+  (JNIEnv * /*env */, jobject /* obj */,
+          jint /* micSource */) {
+    // It is OK to use a raw pointer here because the pointer will be passed back
+    // to Java and only used from one thread.
+    // Java then deletes it from that same thread by calling _closeAudio() below.
+    NativeAudioAnalyzer * analyzer = new NativeAudioAnalyzer();
+    aaudio_result_t result = analyzer->openAudio();
+    if (result != AAUDIO_OK) {
+        delete analyzer;
+        analyzer = nullptr;
+    }
+    return (jlong) analyzer;
+}
+
+JNIEXPORT jint JNICALL Java_com_android_cts_verifier_audio_NativeAnalyzerThread_startAudio
+  (JNIEnv *env __unused, jobject obj __unused, jlong pAnalyzer) {
+    NativeAudioAnalyzer * analyzer = (NativeAudioAnalyzer *) pAnalyzer;
+    int result = AAUDIO_ERROR_NULL;
+    if (analyzer != nullptr) {
+        result = analyzer->startAudio();
+    }
+    return result;
+}
+
+JNIEXPORT jint JNICALL Java_com_android_cts_verifier_audio_NativeAnalyzerThread_stopAudio
+  (JNIEnv *env __unused, jobject obj __unused, jlong pAnalyzer) {
+    NativeAudioAnalyzer * analyzer = (NativeAudioAnalyzer *) pAnalyzer;
+    if (analyzer != nullptr) {
+        return analyzer->stopAudio();
+    }
+    return AAUDIO_ERROR_NULL;
+}
+
+JNIEXPORT jint JNICALL Java_com_android_cts_verifier_audio_NativeAnalyzerThread_closeAudio
+  (JNIEnv *env __unused, jobject obj __unused, jlong pAnalyzer) {
+    NativeAudioAnalyzer * analyzer = (NativeAudioAnalyzer *) pAnalyzer;
+    int result = AAUDIO_ERROR_NULL;
+    if (analyzer != nullptr) {
+        result = analyzer->closeAudio();
+        delete analyzer;
+    }
+    return result;
+}
+
+JNIEXPORT jboolean JNICALL Java_com_android_cts_verifier_audio_NativeAnalyzerThread_isRecordingComplete
+  (JNIEnv *env __unused, jobject obj __unused, jlong pAnalyzer) {
+    NativeAudioAnalyzer * analyzer = (NativeAudioAnalyzer *) pAnalyzer;
+    if (analyzer != nullptr) {
+        return analyzer->isRecordingComplete();
+    }
+    return false;
+}
+
+JNIEXPORT jint JNICALL Java_com_android_cts_verifier_audio_NativeAnalyzerThread_getError
+  (JNIEnv *env __unused, jobject obj __unused, jlong pAnalyzer) {
+    NativeAudioAnalyzer * analyzer = (NativeAudioAnalyzer *) pAnalyzer;
+    if (analyzer != nullptr) {
+        return (jint) analyzer->getError();
+    }
+    return (jint) AAUDIO_ERROR_NULL;
+}
+
+JNIEXPORT jint JNICALL Java_com_android_cts_verifier_audio_NativeAnalyzerThread_analyze
+  (JNIEnv *env __unused, jobject obj __unused, jlong pAnalyzer) {
+    NativeAudioAnalyzer * analyzer = (NativeAudioAnalyzer *) pAnalyzer;
+    if (analyzer != nullptr) {
+        return analyzer->analyze();
+    }
+    return AAUDIO_ERROR_NULL;
+}
+
+JNIEXPORT jdouble JNICALL Java_com_android_cts_verifier_audio_NativeAnalyzerThread_getLatencyMillis
+  (JNIEnv *env __unused, jobject obj __unused, jlong pAnalyzer) {
+    NativeAudioAnalyzer * analyzer = (NativeAudioAnalyzer *) pAnalyzer;
+    if (analyzer != nullptr) {
+        return analyzer->getLatencyMillis();
+    }
+    return -1.0;
+}
+
+JNIEXPORT jdouble JNICALL Java_com_android_cts_verifier_audio_NativeAnalyzerThread_getConfidence
+  (JNIEnv *env __unused, jobject obj __unused, jlong pAnalyzer) {
+    NativeAudioAnalyzer * analyzer = (NativeAudioAnalyzer *) pAnalyzer;
+    if (analyzer != nullptr) {
+        return analyzer->getConfidence();
+    }
+    return 0.0;
+}
+
+}
diff --git a/apps/CtsVerifier/jni/audio_loopback/jni_sles.c b/apps/CtsVerifier/jni/audio_loopback/jni_sles.c
deleted file mode 100644
index e8a837e..0000000
--- a/apps/CtsVerifier/jni/audio_loopback/jni_sles.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <android/log.h>
-#include "sles.h"
-#include "jni_sles.h"
-#include <stdio.h>
-#include <stddef.h>
-
-/////
-JNIEXPORT jlong JNICALL Java_com_android_cts_verifier_audio_NativeAudioThread_slesInit
-  (JNIEnv *env __unused, jobject obj __unused, jint samplingRate, jint frameCount,
-   jint micSource, jint numFramesToIgnore) {
-
-    sles_data * pSles = NULL;
-
-    if (slesInit(&pSles, samplingRate, frameCount, micSource, numFramesToIgnore) != SLES_FAIL) {
-
-        return (long)pSles;
-    }
-    // FIXME This should be stored as a (long) field in the object,
-    //       so that incorrect Java code could not synthesize a bad sles pointer.
-    return 0;
-}
-
-JNIEXPORT jint JNICALL Java_com_android_cts_verifier_audio_NativeAudioThread_slesProcessNext
-  (JNIEnv *env __unused, jobject obj __unused, jlong sles, jdoubleArray samplesArray,
-          jlong offset) {
-    sles_data * pSles= (sles_data*) ((long)sles);
-
-    long maxSamples = (*env)->GetArrayLength(env, samplesArray);
-    double *pSamples = (*env)->GetDoubleArrayElements(env, samplesArray,0);
-
-    long availableSamples = maxSamples-offset;
-    double *pCurrentSample = pSamples+offset;
-
-    SLES_PRINTF("jni slesProcessNext pSles:%p, currentSample %p, availableSamples %ld ", pSles,
-            pCurrentSample, availableSamples);
-
-    int samplesRead = slesProcessNext(pSles, pCurrentSample, availableSamples);
-
-    return samplesRead;
-}
-
-JNIEXPORT jint JNICALL Java_com_android_cts_verifier_audio_NativeAudioThread_slesDestroy
-  (JNIEnv *env __unused, jobject obj __unused, jlong sles) {
-    sles_data * pSles= (sles_data*) ((long) sles);
-
-    int status = slesDestroy(&pSles);
-
-    return status;
-}
diff --git a/apps/CtsVerifier/jni/audio_loopback/jni_sles.h b/apps/CtsVerifier/jni/audio_loopback/jni_sles.h
deleted file mode 100644
index d7aa625..0000000
--- a/apps/CtsVerifier/jni/audio_loopback/jni_sles.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <jni.h>
-
-#ifndef _Included_org_drrickorang_loopback_jni
-#define _Included_org_drrickorang_loopback_jni
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-////////////////////////
-JNIEXPORT jlong JNICALL Java_com_android_cts_verifier_audio_NativeAudioThread_slesInit
-  (JNIEnv *, jobject, jint, jint, jint, jint );
-
-JNIEXPORT jint JNICALL Java_com_android_cts_verifier_audio_NativeAudioThread_slesProcessNext
-  (JNIEnv *, jobject , jlong, jdoubleArray, jlong );
-
-JNIEXPORT jint JNICALL Java_com_android_cts_verifier_audio_NativeAudioThread_slesDestroy
-  (JNIEnv *, jobject , jlong );
-
-
-#ifdef __cplusplus
-}
-#endif
-#endif //_Included_org_drrickorang_loopback_jni
diff --git a/apps/CtsVerifier/jni/audio_loopback/sles.cpp b/apps/CtsVerifier/jni/audio_loopback/sles.cpp
deleted file mode 100644
index 586c60f..0000000
--- a/apps/CtsVerifier/jni/audio_loopback/sles.cpp
+++ /dev/null
@@ -1,673 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-////////////////////////////////////////////
-/// Actual sles functions.
-
-
-// Test program to record from default audio input and playback to default audio output.
-// It will generate feedback (Larsen effect) if played through on-device speakers,
-// or acts as a delay if played through headset.
-
-#include "sles.h"
-#include <stdio.h>
-#include <stdlib.h>
-#include <stddef.h>
-
-#include <assert.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-int slesInit(sles_data ** ppSles, int samplingRate, int frameCount, int micSource,
-             int numFramesToIgnore) {
-    int status = SLES_FAIL;
-    if (ppSles != NULL) {
-        sles_data * pSles = (sles_data*) calloc(1, sizeof (sles_data));
-
-        SLES_PRINTF("malloc %zu bytes at %p", sizeof(sles_data), pSles);
-        *ppSles = pSles;
-        if (pSles != NULL)
-        {
-            SLES_PRINTF("creating server. Sampling rate =%d, frame count = %d",samplingRate,
-                    frameCount);
-            status = slesCreateServer(pSles, samplingRate, frameCount, micSource,
-                                      numFramesToIgnore);
-            SLES_PRINTF("slesCreateServer =%d", status);
-        }
-    }
-    return status;
-}
-int slesDestroy(sles_data ** ppSles) {
-    int status = SLES_FAIL;
-    if (ppSles != NULL) {
-        slesDestroyServer(*ppSles);
-
-        if (*ppSles != NULL)
-        {
-            free(*ppSles);
-            *ppSles = 0;
-        }
-        status = SLES_SUCCESS;
-    }
-    return status;
-}
-
-#define ASSERT_EQ(x, y) do { if ((x) == (y)) ; else { fprintf(stderr, "0x%x != 0x%x\n", \
-        (unsigned) (x), (unsigned) (y)); assert((x) == (y)); } } while (0)
-
-
-// Called after audio recorder fills a buffer with data
-static void recorderCallback(SLAndroidSimpleBufferQueueItf caller __unused, void *context) {
-    sles_data *pSles = (sles_data*) context;
-    if (pSles != NULL) {
-
-
-
-        SLresult result;
-
-        pthread_mutex_lock(&(pSles->mutex));
-        //ee  SLES_PRINTF("<R");
-
-        // We should only be called when a recording buffer is done
-        assert(pSles->rxFront <= pSles->rxBufCount);
-        assert(pSles->rxRear <= pSles->rxBufCount);
-        assert(pSles->rxFront != pSles->rxRear);
-        char *buffer = pSles->rxBuffers[pSles->rxFront];
-
-        // Remove buffer from record queue
-        if (++pSles->rxFront > pSles->rxBufCount) {
-            pSles->rxFront = 0;
-        }
-
-        // Throw out first frames
-        if (pSles->numFramesToIgnore) {
-            SLuint32 framesToErase = pSles->numFramesToIgnore;
-            if (framesToErase > pSles->bufSizeInFrames) {
-                framesToErase = pSles->bufSizeInFrames;
-            }
-            pSles->numFramesToIgnore -= framesToErase;
-            // FIXME: this assumes each sample is a short
-            memset(buffer, 0, framesToErase * pSles->channels * sizeof(short));
-        }
-
-        ssize_t actual = audio_utils_fifo_write(&(pSles->fifo), buffer,
-                (size_t) pSles->bufSizeInFrames);
-        if (actual != (ssize_t) pSles->bufSizeInFrames) {
-            write(1, "?", 1);
-        }
-
-        // This is called by a realtime (SCHED_FIFO) thread,
-        // and it is unsafe to do I/O as it could block for unbounded time.
-        // Flash filesystem is especially notorious for blocking.
-        if (pSles->fifo2Buffer != NULL) {
-            actual = audio_utils_fifo_write(&(pSles->fifo2), buffer,
-                    (size_t) pSles->bufSizeInFrames);
-            if (actual != (ssize_t) pSles->bufSizeInFrames) {
-                write(1, "?", 1);
-            }
-        }
-
-        // Enqueue this same buffer for the recorder to fill again.
-        result = (*(pSles->recorderBufferQueue))->Enqueue(pSles->recorderBufferQueue, buffer,
-                pSles->bufSizeInBytes);
-        ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-        // Update our model of the record queue
-        SLuint32 rxRearNext = pSles->rxRear+1;
-        if (rxRearNext > pSles->rxBufCount) {
-            rxRearNext = 0;
-        }
-        assert(rxRearNext != pSles->rxFront);
-        pSles->rxBuffers[pSles->rxRear] = buffer;
-        pSles->rxRear = rxRearNext;
-
-
-
-        //ee  SLES_PRINTF("r>");
-        pthread_mutex_unlock(&(pSles->mutex));
-
-    } //pSles not null
-}
-
-
-// Called after audio player empties a buffer of data
-static void playerCallback(SLBufferQueueItf caller __unused, void *context) {
-    sles_data *pSles = (sles_data*) context;
-    if (pSles != NULL) {
-
-        SLresult result;
-
-        pthread_mutex_lock(&(pSles->mutex));
-        //ee  SLES_PRINTF("<P");
-
-        // Get the buffer that just finished playing
-        assert(pSles->txFront <= pSles->txBufCount);
-        assert(pSles->txRear <= pSles->txBufCount);
-        assert(pSles->txFront != pSles->txRear);
-        char *buffer = pSles->txBuffers[pSles->txFront];
-        if (++pSles->txFront > pSles->txBufCount) {
-            pSles->txFront = 0;
-        }
-
-
-        ssize_t actual = audio_utils_fifo_read(&(pSles->fifo), buffer, pSles->bufSizeInFrames);
-        if (actual != (ssize_t) pSles->bufSizeInFrames) {
-            write(1, "/", 1);
-            // on underrun from pipe, substitute silence
-            memset(buffer, 0, pSles->bufSizeInFrames * pSles->channels * sizeof(short));
-        }
-
-        if (pSles->injectImpulse == -1) {
-            // Experimentally, a single frame impulse was insufficient to trigger feedback.
-            // Also a Nyquist frequency signal was also insufficient, probably because
-            // the response of output and/or input path was not adequate at high frequencies.
-            // This short burst of a few cycles of square wave at Nyquist/4 was found to work well.
-            for (unsigned i = 0; i < pSles->bufSizeInFrames / 8; i += 8) {
-                for (int j = 0; j < 8; j++) {
-                    for (unsigned k = 0; k < pSles->channels; k++) {
-                        ((short *)buffer)[(i+j)*pSles->channels+k] = j < 4 ? 0x7FFF : 0x8000;
-                    }
-                }
-            }
-            pSles->injectImpulse = 0;
-        }
-
-        // Enqueue the filled buffer for playing
-        result = (*(pSles->playerBufferQueue))->Enqueue(pSles->playerBufferQueue, buffer,
-                pSles->bufSizeInBytes);
-        ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-        // Update our model of the player queue
-        assert(pSles->txFront <= pSles->txBufCount);
-        assert(pSles->txRear <= pSles->txBufCount);
-        SLuint32 txRearNext = pSles->txRear+1;
-        if (txRearNext > pSles->txBufCount) {
-            txRearNext = 0;
-        }
-        assert(txRearNext != pSles->txFront);
-        pSles->txBuffers[pSles->txRear] = buffer;
-        pSles->txRear = txRearNext;
-
-
-        //ee    SLES_PRINTF("p>");
-        pthread_mutex_unlock(&(pSles->mutex));
-
-    } //pSles not null
-}
-
-int slesCreateServer(sles_data *pSles, int samplingRate, int frameCount,
-                     int micSource, int numFramesToIgnore) {
-    int status = SLES_FAIL;
-
-    if (pSles == NULL) {
-        return status;
-    }
-
-    //        adb shell slesTest_feedback -r1 -t1 -s48000 -f240 -i300 -e3 -o/sdcard/log.wav
-    //            r1 and t1 are the receive and transmit buffer counts, typically 1
-    //            s is the sample rate, typically 48000 or 44100
-    //            f is the frame count per buffer, typically 240 or 256
-    //            i is the number of milliseconds before impulse.  You may need to adjust this.
-    //            e is number of seconds to record
-    //            o is output .wav file name
-
-
-    //        // default values
-    //        SLuint32 rxBufCount = 1;     // -r#
-    //        SLuint32 txBufCount = 1;     // -t#
-    //        SLuint32 bufSizeInFrames = 240;  // -f#
-    //        SLuint32 channels = 1;       // -c#
-    //        SLuint32 sampleRate = 48000; // -s#
-    //        SLuint32 exitAfterSeconds = 3; // -e#
-    //        SLuint32 freeBufCount = 0;   // calculated
-    //        SLuint32 bufSizeInBytes = 0; // calculated
-    //        int injectImpulse = 300; // -i#i
-    //
-    //        // Storage area for the buffer queues
-    //        char **rxBuffers;
-    //        char **txBuffers;
-    //        char **freeBuffers;
-    //
-    //        // Buffer indices
-    //        SLuint32 rxFront;    // oldest recording
-    //        SLuint32 rxRear;     // next to be recorded
-    //        SLuint32 txFront;    // oldest playing
-    //        SLuint32 txRear;     // next to be played
-    //        SLuint32 freeFront;  // oldest free
-    //        SLuint32 freeRear;   // next to be freed
-    //
-    //        audio_utils_fifo fifo; //(*)
-    //        SLAndroidSimpleBufferQueueItf recorderBufferQueue;
-    //        SLBufferQueueItf playerBufferQueue;
-
-    // default values
-    pSles->rxBufCount = 1;     // -r#
-    pSles->txBufCount = 1;     // -t#
-    pSles->bufSizeInFrames = frameCount;//240;  // -f#
-    pSles->channels = 1;       // -c#
-    pSles->sampleRate = samplingRate;//48000; // -s#
-    pSles->exitAfterSeconds = 3; // -e#
-    pSles->freeBufCount = 0;   // calculated
-    pSles->bufSizeInBytes = 0; // calculated
-    pSles->injectImpulse = 300; // -i#i
-
-    if (numFramesToIgnore > 0) {
-        pSles->numFramesToIgnore = numFramesToIgnore;
-    } else {
-        pSles->numFramesToIgnore = 0;
-    }
-
-    // Storage area for the buffer queues
-    //        char **rxBuffers;
-    //        char **txBuffers;
-    //        char **freeBuffers;
-
-    // Buffer indices
-/*
-    pSles->rxFront;    // oldest recording
-    pSles->rxRear;     // next to be recorded
-    pSles->txFront;    // oldest playing
-    pSles->txRear;     // next to be played
-    pSles->freeFront;  // oldest free
-    pSles->freeRear;   // next to be freed
-
-    pSles->fifo; //(*)
-*/
-    pSles->fifo2Buffer = NULL;
-
-    // compute total free buffers as -r plus -t
-    pSles->freeBufCount = pSles->rxBufCount + pSles->txBufCount;
-    // compute buffer size
-    pSles->bufSizeInBytes = pSles->channels * pSles->bufSizeInFrames * sizeof(short);
-
-    // Initialize free buffers
-    pSles->freeBuffers = (char **) calloc(pSles->freeBufCount+1, sizeof(char *));
-    unsigned j;
-    for (j = 0; j < pSles->freeBufCount; ++j) {
-        pSles->freeBuffers[j] = (char *) malloc(pSles->bufSizeInBytes);
-    }
-    pSles->freeFront = 0;
-    pSles->freeRear = pSles->freeBufCount;
-    pSles->freeBuffers[j] = NULL;
-
-    // Initialize record queue
-    pSles->rxBuffers = (char **) calloc(pSles->rxBufCount+1, sizeof(char *));
-    pSles->rxFront = 0;
-    pSles->rxRear = 0;
-
-    // Initialize play queue
-    pSles->txBuffers = (char **) calloc(pSles->txBufCount+1, sizeof(char *));
-    pSles->txFront = 0;
-    pSles->txRear = 0;
-
-    size_t frameSize = pSles->channels * sizeof(short);
-#define FIFO_FRAMES 1024
-    pSles->fifoBuffer = new short[FIFO_FRAMES * pSles->channels];
-    audio_utils_fifo_init(&(pSles->fifo), FIFO_FRAMES, frameSize, pSles->fifoBuffer);
-
-    //        SNDFILE *sndfile;
-    //        if (outFileName != NULL) {
-    // create .wav writer
-    //            SF_INFO info;
-    //            info.frames = 0;
-    //            info.samplerate = sampleRate;
-    //            info.channels = channels;
-    //            info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
-    //            sndfile = sf_open(outFileName, SFM_WRITE, &info);
-    //            if (sndfile != NULL) {
-#define FIFO2_FRAMES 65536
-    pSles->fifo2Buffer = new short[FIFO2_FRAMES * pSles->channels];
-    audio_utils_fifo_init(&(pSles->fifo2), FIFO2_FRAMES, frameSize, pSles->fifo2Buffer);
-    //            } else {
-    //                fprintf(stderr, "sf_open failed\n");
-    //            }
-    //        } else {
-    //            sndfile = NULL;
-    //        }
-
-    SLresult result;
-
-    // create engine
-    result = slCreateEngine(&(pSles->engineObject), 0, NULL, 0, NULL, NULL);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    result = (*(pSles->engineObject))->Realize(pSles->engineObject, SL_BOOLEAN_FALSE);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    SLEngineItf engineEngine;
-    result = (*(pSles->engineObject))->GetInterface(pSles->engineObject, SL_IID_ENGINE,
-            &engineEngine);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-    // create output mix
-    result = (*engineEngine)->CreateOutputMix(engineEngine, &(pSles->outputmixObject), 0, NULL,
-            NULL);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    result = (*(pSles->outputmixObject))->Realize(pSles->outputmixObject, SL_BOOLEAN_FALSE);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-    // create an audio player with buffer queue source and output mix sink
-    SLDataSource audiosrc;
-    SLDataSink audiosnk;
-    SLDataFormat_PCM pcm;
-    SLDataLocator_OutputMix locator_outputmix;
-    SLDataLocator_BufferQueue locator_bufferqueue_tx;
-    locator_bufferqueue_tx.locatorType = SL_DATALOCATOR_BUFFERQUEUE;
-    locator_bufferqueue_tx.numBuffers = pSles->txBufCount;
-    locator_outputmix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
-    locator_outputmix.outputMix = pSles->outputmixObject;
-    pcm.formatType = SL_DATAFORMAT_PCM;
-    pcm.numChannels = pSles->channels;
-    pcm.samplesPerSec = pSles->sampleRate * 1000;
-    pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
-    pcm.containerSize = 16;
-    pcm.channelMask = pSles->channels == 1 ? SL_SPEAKER_FRONT_CENTER :
-            (SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT);
-    pcm.endianness = SL_BYTEORDER_LITTLEENDIAN;
-    audiosrc.pLocator = &locator_bufferqueue_tx;
-    audiosrc.pFormat = &pcm;
-    audiosnk.pLocator = &locator_outputmix;
-    audiosnk.pFormat = NULL;
-    pSles->playerObject = NULL;
-    pSles->recorderObject = NULL;
-    SLInterfaceID ids_tx[1] = {SL_IID_BUFFERQUEUE};
-    SLboolean flags_tx[1] = {SL_BOOLEAN_TRUE};
-    result = (*engineEngine)->CreateAudioPlayer(engineEngine, &(pSles->playerObject),
-            &audiosrc, &audiosnk, 1, ids_tx, flags_tx);
-    if (SL_RESULT_CONTENT_UNSUPPORTED == result) {
-        fprintf(stderr, "Could not create audio player (result %x), check sample rate\n",
-                result);
-        SLES_PRINTF("ERROR: Could not create audio player (result %x), check sample rate\n",
-                result);
-        goto cleanup;
-    }
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    result = (*(pSles->playerObject))->Realize(pSles->playerObject, SL_BOOLEAN_FALSE);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    SLPlayItf playerPlay;
-    result = (*(pSles->playerObject))->GetInterface(pSles->playerObject, SL_IID_PLAY,
-            &playerPlay);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    result = (*(pSles->playerObject))->GetInterface(pSles->playerObject, SL_IID_BUFFERQUEUE,
-            &(pSles->playerBufferQueue));
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    result = (*(pSles->playerBufferQueue))->RegisterCallback(pSles->playerBufferQueue,
-            playerCallback, pSles);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-    // Enqueue some zero buffers for the player
-    for (j = 0; j < pSles->txBufCount; ++j) {
-
-        // allocate a free buffer
-        assert(pSles->freeFront != pSles->freeRear);
-        char *buffer = pSles->freeBuffers[pSles->freeFront];
-        if (++pSles->freeFront > pSles->freeBufCount) {
-            pSles->freeFront = 0;
-        }
-
-        // put on play queue
-        SLuint32 txRearNext = pSles->txRear + 1;
-        if (txRearNext > pSles->txBufCount) {
-            txRearNext = 0;
-        }
-        assert(txRearNext != pSles->txFront);
-        pSles->txBuffers[pSles->txRear] = buffer;
-        pSles->txRear = txRearNext;
-        result = (*(pSles->playerBufferQueue))->Enqueue(pSles->playerBufferQueue,
-                buffer, pSles->bufSizeInBytes);
-        ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    }
-
-    result = (*playerPlay)->SetPlayState(playerPlay, SL_PLAYSTATE_PLAYING);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-    // Create an audio recorder with microphone device source and buffer queue sink.
-    // The buffer queue as sink is an Android-specific extension.
-
-    SLDataLocator_IODevice locator_iodevice;
-    SLDataLocator_AndroidSimpleBufferQueue locator_bufferqueue_rx;
-    locator_iodevice.locatorType = SL_DATALOCATOR_IODEVICE;
-    locator_iodevice.deviceType = SL_IODEVICE_AUDIOINPUT;
-    locator_iodevice.deviceID = SL_DEFAULTDEVICEID_AUDIOINPUT;
-    locator_iodevice.device = NULL;
-    audiosrc.pLocator = &locator_iodevice;
-    audiosrc.pFormat = NULL;
-    locator_bufferqueue_rx.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
-    locator_bufferqueue_rx.numBuffers = pSles->rxBufCount;
-    audiosnk.pLocator = &locator_bufferqueue_rx;
-    audiosnk.pFormat = &pcm;
-    {
-        SLInterfaceID ids_rx[2] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
-                SL_IID_ANDROIDCONFIGURATION};
-        SLboolean flags_rx[2] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
-        result = (*engineEngine)->CreateAudioRecorder(engineEngine, &(pSles->recorderObject),
-                &audiosrc, &audiosnk, 2, ids_rx, flags_rx);
-        if (SL_RESULT_SUCCESS != result) {
-            fprintf(stderr, "Could not create audio recorder (result %x), "
-                    "check sample rate and channel count\n", result);
-            status = SLES_FAIL;
-
-            SLES_PRINTF("ERROR: Could not create audio recorder (result %x), "
-                    "check sample rate and channel count\n", result);
-            goto cleanup;
-        }
-    }
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-    {
-        /* Get the Android configuration interface which is explicit */
-        SLAndroidConfigurationItf configItf;
-        result = (*(pSles->recorderObject))->GetInterface(pSles->recorderObject,
-                SL_IID_ANDROIDCONFIGURATION, (void*)&configItf);
-        ASSERT_EQ(SL_RESULT_SUCCESS, result);
-        SLuint32 presetValue = micSource;
-        /* Use the configuration interface to configure the recorder before it's realized */
-        if (presetValue != SL_ANDROID_RECORDING_PRESET_NONE) {
-            result = (*configItf)->SetConfiguration(configItf, SL_ANDROID_KEY_RECORDING_PRESET,
-                    &presetValue, sizeof(SLuint32));
-            ASSERT_EQ(SL_RESULT_SUCCESS, result);
-        }
-
-    }
-
-    result = (*(pSles->recorderObject))->Realize(pSles->recorderObject, SL_BOOLEAN_FALSE);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    SLRecordItf recorderRecord;
-    result = (*(pSles->recorderObject))->GetInterface(pSles->recorderObject, SL_IID_RECORD,
-            &recorderRecord);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    result = (*(pSles->recorderObject))->GetInterface(pSles->recorderObject,
-            SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &(pSles->recorderBufferQueue));
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    result = (*(pSles->recorderBufferQueue))->RegisterCallback(pSles->recorderBufferQueue,
-            recorderCallback, pSles);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-    // Enqueue some empty buffers for the recorder
-    for (j = 0; j < pSles->rxBufCount; ++j) {
-
-        // allocate a free buffer
-        assert(pSles->freeFront != pSles->freeRear);
-        char *buffer = pSles->freeBuffers[pSles->freeFront];
-        if (++pSles->freeFront > pSles->freeBufCount) {
-            pSles->freeFront = 0;
-        }
-
-        // put on record queue
-        SLuint32 rxRearNext = pSles->rxRear + 1;
-        if (rxRearNext > pSles->rxBufCount) {
-            rxRearNext = 0;
-        }
-        assert(rxRearNext != pSles->rxFront);
-        pSles->rxBuffers[pSles->rxRear] = buffer;
-        pSles->rxRear = rxRearNext;
-        result = (*(pSles->recorderBufferQueue))->Enqueue(pSles->recorderBufferQueue,
-                buffer, pSles->bufSizeInBytes);
-        ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    }
-
-    // Kick off the recorder
-    result = (*recorderRecord)->SetRecordState(recorderRecord, SL_RECORDSTATE_RECORDING);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-    // Tear down the objects and exit
-    status = SLES_SUCCESS;
-    cleanup:
-    SLES_PRINTF("Finished initialization with status: %d", status);
-
-    return status;
-}
-
-int slesProcessNext(sles_data *pSles, double *pSamples, long maxSamples) {
-    //int status = SLES_FAIL;
-
-    SLES_PRINTF("slesProcessNext: pSles = %p, currentSample: %p,  maxSamples = %ld", pSles,
-            pSamples, maxSamples);
-
-    int samplesRead = 0;
-
-    int currentSample = 0;
-    double *pCurrentSample = pSamples;
-    int maxValue = 32768;
-
-    if (pSles == NULL) {
-        return samplesRead;
-    }
-
-    SLresult result;
-    for (int i = 0; i < 10; i++) {
-        usleep(100000);
-        if (pSles->fifo2Buffer != NULL) {
-            for (;;) {
-                short buffer[pSles->bufSizeInFrames * pSles->channels];
-                ssize_t actual = audio_utils_fifo_read(&(pSles->fifo2), buffer,
-                        pSles->bufSizeInFrames);
-                if (actual <= 0)
-                    break;
-                {
-                    for (int jj =0; jj<actual && currentSample < maxSamples; jj++) {
-                        *(pCurrentSample++) = ((double)buffer[jj])/maxValue;
-                        currentSample++;
-                    }
-                }
-                samplesRead +=actual;
-            }
-        }
-        if (pSles->injectImpulse > 0) {
-            if (pSles->injectImpulse <= 100) {
-                pSles->injectImpulse = -1;
-                write(1, "I", 1);
-            } else {
-                if ((pSles->injectImpulse % 1000) < 100) {
-                    write(1, "i", 1);
-                }
-                pSles->injectImpulse -= 100;
-            }
-        } else if (i == 9) {
-            write(1, ".", 1);
-        }
-    }
-    SLBufferQueueState playerBQState;
-    result = (*(pSles->playerBufferQueue))->GetState(pSles->playerBufferQueue,
-            &playerBQState);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    SLAndroidSimpleBufferQueueState recorderBQState;
-    result = (*(pSles->recorderBufferQueue))->GetState(pSles->recorderBufferQueue,
-            &recorderBQState);
-    ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-    SLES_PRINTF("End of slesProcessNext: pSles = %p, samplesRead = %d, maxSamples= %ld", pSles,
-            samplesRead, maxSamples);
-
-    return samplesRead;
-}
-
-int slesDestroyServer(sles_data *pSles) {
-    int status = SLES_FAIL;
-
-    SLES_PRINTF("Start slesDestroyServer: pSles = %p", pSles);
-    if (pSles == NULL) {
-        return status;
-    }
-
-    if (NULL != pSles->playerObject) {
-
-        SLES_PRINTF("stopping player...");
-        SLPlayItf playerPlay;
-        SLresult result = (*(pSles->playerObject))->GetInterface(pSles->playerObject,
-                SL_IID_PLAY, &playerPlay);
-
-        ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-        //stop player and recorder if they exist
-        result = (*playerPlay)->SetPlayState(playerPlay, SL_PLAYSTATE_STOPPED);
-        ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    }
-
-    if (NULL != pSles->recorderObject) {
-        SLES_PRINTF("stopping recorder...");
-        SLRecordItf recorderRecord;
-        SLresult result = (*(pSles->recorderObject))->GetInterface(pSles->recorderObject,
-                SL_IID_RECORD, &recorderRecord);
-        ASSERT_EQ(SL_RESULT_SUCCESS, result);
-
-        result = (*recorderRecord)->SetRecordState(recorderRecord, SL_RECORDSTATE_STOPPED);
-        ASSERT_EQ(SL_RESULT_SUCCESS, result);
-    }
-
-    usleep(1000);
-
-    audio_utils_fifo_deinit(&(pSles->fifo));
-    delete[] pSles->fifoBuffer;
-
-    SLES_PRINTF("slesDestroyServer 2");
-
-    //        if (sndfile != NULL) {
-    audio_utils_fifo_deinit(&(pSles->fifo2));
-    delete[] pSles->fifo2Buffer;
-
-    SLES_PRINTF("slesDestroyServer 3");
-
-    //            sf_close(sndfile);
-    //        }
-    if (NULL != pSles->playerObject) {
-        (*(pSles->playerObject))->Destroy(pSles->playerObject);
-    }
-
-    SLES_PRINTF("slesDestroyServer 4");
-
-    if (NULL != pSles->recorderObject) {
-        (*(pSles->recorderObject))->Destroy(pSles->recorderObject);
-    }
-
-    SLES_PRINTF("slesDestroyServer 5");
-
-    (*(pSles->outputmixObject))->Destroy(pSles->outputmixObject);
-    SLES_PRINTF("slesDestroyServer 6");
-    (*(pSles->engineObject))->Destroy(pSles->engineObject);
-    SLES_PRINTF("slesDestroyServer 7");
-
-    //        free(pSles);
-    //        pSles=NULL;
-
-    status = SLES_SUCCESS;
-
-    SLES_PRINTF("End slesDestroyServer: status = %d", status);
-    return status;
-}
-
diff --git a/apps/CtsVerifier/jni/audio_loopback/sles.h b/apps/CtsVerifier/jni/audio_loopback/sles.h
deleted file mode 100644
index 607f724..0000000
--- a/apps/CtsVerifier/jni/audio_loopback/sles.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <SLES/OpenSLES.h>
-#include <SLES/OpenSLES_Android.h>
-#include <pthread.h>
-#include <android/log.h>
-
-#ifndef _Included_org_drrickorang_loopback_sles
-#define _Included_org_drrickorang_loopback_sles
-
-//struct audio_utils_fifo;
-#define SLES_PRINTF(...)  __android_log_print(ANDROID_LOG_INFO, "sles_jni", __VA_ARGS__);
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-#include <audio_utils/fifo.h>
-
-typedef struct {
-    SLuint32 rxBufCount;     // -r#
-    SLuint32 txBufCount;     // -t#
-    SLuint32 bufSizeInFrames;  // -f#
-    SLuint32 channels;       // -c#
-    SLuint32 sampleRate; // -s#
-    SLuint32 exitAfterSeconds; // -e#
-    SLuint32 freeBufCount;   // calculated
-    SLuint32 bufSizeInBytes; // calculated
-    int injectImpulse; // -i#i
-    SLuint32 numFramesToIgnore;
-
-    // Storage area for the buffer queues
-    char **rxBuffers;
-    char **txBuffers;
-    char **freeBuffers;
-
-    // Buffer indices
-    SLuint32 rxFront;    // oldest recording
-    SLuint32 rxRear;     // next to be recorded
-    SLuint32 txFront;    // oldest playing
-    SLuint32 txRear;     // next to be played
-    SLuint32 freeFront;  // oldest free
-    SLuint32 freeRear;   // next to be freed
-
-    struct audio_utils_fifo fifo; //(*)
-    struct audio_utils_fifo fifo2;
-    short *fifo2Buffer;
-    short *fifoBuffer;
-    SLAndroidSimpleBufferQueueItf recorderBufferQueue;
-    SLBufferQueueItf playerBufferQueue;
-
-    pthread_mutex_t mutex;// = PTHREAD_MUTEX_INITIALIZER;
-
-    //other things that belong here
-    SLObjectItf playerObject;
-    SLObjectItf recorderObject;
-    SLObjectItf outputmixObject;
-    SLObjectItf engineObject;
-} sles_data;
-
-enum {
-    SLES_SUCCESS = 0,
-    SLES_FAIL = 1,
-} SLES_STATUS_ENUM;
-
-int slesInit(sles_data ** ppSles, int samplingRate, int frameCount,
-             int micSource, int numFramesToIgnore);
-
-//note the double pointer to properly free the memory of the structure
-int slesDestroy(sles_data ** ppSles);
-
-///full
-int slesFull(sles_data *pSles);
-
-int slesCreateServer(sles_data *pSles, int samplingRate, int frameCount, int micSource, int numFramesToIgnore);
-int slesProcessNext(sles_data *pSles, double *pSamples, long maxSamples);
-int slesDestroyServer(sles_data *pSles);
-
-#ifdef __cplusplus
-}
-#endif
-#endif //_Included_org_drrickorang_loopback_sles
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/audio/AudioLoopbackActivity.java b/apps/CtsVerifier/src/com/android/cts/verifier/audio/AudioLoopbackActivity.java
index efffe73..e4921ed 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/audio/AudioLoopbackActivity.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/audio/AudioLoopbackActivity.java
@@ -27,6 +27,7 @@
 import android.media.AudioDeviceInfo;
 import android.media.AudioManager;
 import android.media.AudioTrack;
+import android.media.MediaRecorder;
 
 import android.os.Bundle;
 import android.os.Handler;
@@ -50,15 +51,14 @@
 
     public static final int BYTES_PER_FRAME = 2;
 
-    NativeAudioThread nativeAudioThread = null;
+    NativeAnalyzerThread mNativeAnalyzerThread = null;
 
     private int mSamplingRate = 44100;
     private int mMinBufferSizeInFrames = 0;
     private static final double CONFIDENCE_THRESHOLD = 0.6;
-    private Correlation mCorrelation = new Correlation();
 
-    // TODO: remove this variable
-    private int mNumFramesToIgnore = 0;
+    private double mLatencyMillis;
+    private double mConfidence;
 
     OnBtnClickListener mBtnClickListener = new OnBtnClickListener();
     Context mContext;
@@ -90,13 +90,13 @@
                 case R.id.audio_general_headset_yes:
                     Log.i(TAG, "User confirms Headset Port existence");
                     mLoopbackPlugReady.setEnabled(true);
-                    recordHeasetPortFound(true);
+                    recordHeadsetPortFound(true);
                     mHeadsetPortYes.setEnabled(false);
                     mHeadsetPortNo.setEnabled(false);
                     break;
                 case R.id.audio_general_headset_no:
                     Log.i(TAG, "User denies Headset Port existence");
-                    recordHeasetPortFound(false);
+                    recordHeadsetPortFound(false);
                     getPassButton().setEnabled(true);
                     mHeadsetPortYes.setEnabled(false);
                     mHeadsetPortNo.setEnabled(false);
@@ -158,6 +158,7 @@
         setPassFailButtonClickListeners();
         getPassButton().setEnabled(false);
         setInfoResources(R.string.audio_loopback_test, R.string.audio_loopback_info, -1);
+
     }
 
     /**
@@ -191,41 +192,43 @@
      */
     private void startAudioTest() {
         getPassButton().setEnabled(false);
+        mTestButton.setEnabled(false);
+        mLatencyMillis = 0.0;
+        mConfidence = 0.0;
 
-        //get system defaults for sampling rate, buffers.
-        AudioManager am = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
-        String value = am.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
-        mMinBufferSizeInFrames = Integer.parseInt(value);
-
-        int minBufferSizeInBytes = BYTES_PER_FRAME * mMinBufferSizeInFrames;
-
-        mSamplingRate = AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_MUSIC);
-
-        Log.i(TAG, String.format("startAudioTest sr:%d , buffer:%d frames",
-                mSamplingRate, mMinBufferSizeInFrames));
-
-        nativeAudioThread = new NativeAudioThread();
-        if (nativeAudioThread != null) {
-            nativeAudioThread.setMessageHandler(mMessageHandler);
-            nativeAudioThread.mSessionId = 0;
-            nativeAudioThread.setParams(mSamplingRate,
-                    minBufferSizeInBytes,
-                    minBufferSizeInBytes,
-                    0x03 /*voice recognition*/,
-                    mNumFramesToIgnore);
-            nativeAudioThread.start();
+        mNativeAnalyzerThread = new NativeAnalyzerThread();
+        if (mNativeAnalyzerThread != null) {
+            mNativeAnalyzerThread.setMessageHandler(mMessageHandler);
+            // This value matches AAUDIO_INPUT_PRESET_VOICE_RECOGNITION
+            mNativeAnalyzerThread.setInputPreset(MediaRecorder.AudioSource.VOICE_RECOGNITION);
+            mNativeAnalyzerThread.startTest();
 
             try {
                 Thread.sleep(200);
             } catch (InterruptedException e) {
                 e.printStackTrace();
             }
-
-            nativeAudioThread.runTest();
-
         }
     }
 
+    private void handleTestCompletion() {
+        recordTestResults();
+        boolean resultValid = mConfidence >= CONFIDENCE_THRESHOLD
+                && mLatencyMillis > 1.0;
+        getPassButton().setEnabled(resultValid);
+
+        // Make sure the test thread is finished. It should already be done.
+        if (mNativeAnalyzerThread != null) {
+            try {
+                mNativeAnalyzerThread.stopTest(2 * 1000);
+            } catch (InterruptedException e) {
+                e.printStackTrace();
+            }
+        }
+        showWait(false);
+        mTestButton.setEnabled(true);
+    }
+
     /**
      * handler for messages from audio thread
      */
@@ -233,45 +236,35 @@
         public void handleMessage(Message msg) {
             super.handleMessage(msg);
             switch(msg.what) {
-                case NativeAudioThread.NATIVE_AUDIO_THREAD_MESSAGE_REC_STARTED:
+                case NativeAnalyzerThread.NATIVE_AUDIO_THREAD_MESSAGE_REC_STARTED:
                     Log.v(TAG,"got message native rec started!!");
                     showWait(true);
                     mResultText.setText("Test Running...");
                     break;
-                case NativeAudioThread.NATIVE_AUDIO_THREAD_MESSAGE_REC_ERROR:
+                case NativeAnalyzerThread.NATIVE_AUDIO_THREAD_MESSAGE_OPEN_ERROR:
                     Log.v(TAG,"got message native rec can't start!!");
-                    showWait(false);
-                    mResultText.setText("Test Error.");
+                    mResultText.setText("Test Error opening streams.");
+                    handleTestCompletion();
                     break;
-                case NativeAudioThread.NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE:
-                case NativeAudioThread.NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE_ERRORS:
-                    if (nativeAudioThread != null) {
-                        Log.v(TAG,"Finished recording.");
-                        double [] waveData = nativeAudioThread.getWaveData();
-                        mCorrelation.computeCorrelation(waveData, mSamplingRate);
-                        mResultText.setText(String.format(
-                                "Test Finished\nLatency:%.2f ms\nConfidence: %.2f",
-                                mCorrelation.mEstimatedLatencyMs,
-                                mCorrelation.mEstimatedLatencyConfidence));
-
-                        recordTestResults();
-                        if (mCorrelation.mEstimatedLatencyConfidence >= CONFIDENCE_THRESHOLD) {
-                            getPassButton().setEnabled(true);
-                        }
-
-                        //close
-                        if (nativeAudioThread != null) {
-                            nativeAudioThread.isRunning = false;
-                            try {
-                                nativeAudioThread.finish();
-                                nativeAudioThread.join();
-                            } catch (InterruptedException e) {
-                                e.printStackTrace();
-                            }
-                            nativeAudioThread = null;
-                        }
-                        showWait(false);
+                case NativeAnalyzerThread.NATIVE_AUDIO_THREAD_MESSAGE_REC_ERROR:
+                    Log.v(TAG,"got message native rec can't start!!");
+                    mResultText.setText("Test Error while recording.");
+                    handleTestCompletion();
+                    break;
+                case NativeAnalyzerThread.NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE_ERRORS:
+                    mResultText.setText("Test FAILED due to errors.");
+                    handleTestCompletion();
+                    break;
+                case NativeAnalyzerThread.NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE:
+                    if (mNativeAnalyzerThread != null) {
+                        mLatencyMillis = mNativeAnalyzerThread.getLatencyMillis();
+                        mConfidence = mNativeAnalyzerThread.getConfidence();
                     }
+                    mResultText.setText(String.format(
+                            "Test Finished\nLatency:%.2f ms\nConfidence: %.2f",
+                            mLatencyMillis,
+                            mConfidence));
+                    handleTestCompletion();
                     break;
                 default:
                     break;
@@ -286,13 +279,13 @@
 
         getReportLog().addValue(
                 "Estimated Latency",
-                mCorrelation.mEstimatedLatencyMs,
+                mLatencyMillis,
                 ResultType.LOWER_BETTER,
                 ResultUnit.MS);
 
         getReportLog().addValue(
                 "Confidence",
-                mCorrelation.mEstimatedLatencyConfidence,
+                mConfidence,
                 ResultType.HIGHER_BETTER,
                 ResultUnit.NONE);
 
@@ -318,7 +311,7 @@
         Log.v(TAG,"Results Recorded");
     }
 
-    private void recordHeasetPortFound(boolean found) {
+    private void recordHeadsetPortFound(boolean found) {
         getReportLog().addValue(
                 "User Reported Headset Port",
                 found ? 1.0 : 0,
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/audio/Correlation.java b/apps/CtsVerifier/src/com/android/cts/verifier/audio/Correlation.java
deleted file mode 100644
index c653d1d..0000000
--- a/apps/CtsVerifier/src/com/android/cts/verifier/audio/Correlation.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.cts.verifier.audio;
-
-import android.util.Log;
-
-
-public class Correlation {
-
-    private int mBlockSize = 4096;
-    private int mSamplingRate = 44100;
-    private double [] mDataDownsampled = new double [mBlockSize];
-    private double [] mDataAutocorrelated = new double[mBlockSize];
-
-    public double mEstimatedLatencySamples = 0;
-    public double mEstimatedLatencyMs = 0;
-    public double mEstimatedLatencyConfidence = 0.0;
-
-    private double mAmplitudeThreshold = 0.001;  // 0.001 = -60 dB noise
-
-    public void init(int blockSize, int samplingRate) {
-        mBlockSize = blockSize;
-        mSamplingRate = samplingRate;
-    }
-
-    public boolean computeCorrelation(double [] data, int samplingRate) {
-        boolean status = false;
-        log("Started Auto Correlation for data with " + data.length + " points");
-        mSamplingRate = samplingRate;
-
-        downsampleData(data, mDataDownsampled, mAmplitudeThreshold);
-
-        //correlation vector
-        autocorrelation(mDataDownsampled, mDataAutocorrelated);
-
-        int N = data.length; //all samples available
-        double groupSize =  (double) N / mBlockSize;  //samples per downsample point.
-
-        double maxValue = 0;
-        int maxIndex = -1;
-
-        double minLatencyMs = 8; //min latency expected. This algorithm should be improved.
-        int minIndex = (int)(0.5 + minLatencyMs * mSamplingRate / (groupSize*1000));
-
-        double average = 0;
-        double rms = 0;
-        //find max
-        for (int i=minIndex; i<mDataAutocorrelated.length; i++) {
-            average += mDataAutocorrelated[i];
-            rms += mDataAutocorrelated[i]*mDataAutocorrelated[i];
-            if (mDataAutocorrelated[i] > maxValue) {
-                maxValue = mDataAutocorrelated[i];
-                maxIndex = i;
-            }
-        }
-
-        rms = Math.sqrt(rms/mDataAutocorrelated.length);
-        average = average/mDataAutocorrelated.length;
-        log(String.format(" Maxvalue %f, max Index : %d/%d (%d)  minIndex=%d",maxValue, maxIndex,
-                mDataAutocorrelated.length, data.length, minIndex));
-
-        log(String.format("  average : %.3f  rms: %.3f", average, rms));
-
-        mEstimatedLatencyConfidence = 0.0;
-        if (average>0) {
-            double factor = 3.0;
-
-            double raw = (rms-average) /(factor*average);
-            log(String.format("Raw: %.3f",raw));
-            mEstimatedLatencyConfidence = Math.max(Math.min(raw, 1.0),0.0);
-        }
-
-        log(String.format(" ****Confidence: %.2f",mEstimatedLatencyConfidence));
-
-        mEstimatedLatencySamples = maxIndex*groupSize;
-
-        mEstimatedLatencyMs = mEstimatedLatencySamples *1000/mSamplingRate;
-
-        log(String.format(" latencySamples: %.2f  %.2f ms", mEstimatedLatencySamples,
-                mEstimatedLatencyMs));
-
-        status = true;
-        return status;
-    }
-
-    private boolean downsampleData(double [] data, double [] dataDownsampled, double threshold) {
-
-        boolean status = false;
-        // mDataDownsampled = new double[mBlockSize];
-        for (int i=0; i<mBlockSize; i++) {
-            dataDownsampled[i] = 0;
-        }
-
-        int N = data.length; //all samples available
-        double groupSize =  (double) N / mBlockSize;
-
-        int ignored = 0;
-
-        int currentIndex = 0;
-        double nextGroup = groupSize;
-        for (int i = 0; i<N && currentIndex<mBlockSize; i++) {
-
-            if (i> nextGroup) { //advanced to next group.
-                currentIndex++;
-                nextGroup += groupSize;
-            }
-
-            if (currentIndex>=mBlockSize) {
-                break;
-            }
-
-            double value =  Math.abs(data[i]);
-            if (value >= threshold) {
-                dataDownsampled[currentIndex] += value;
-            } else {
-                ignored++;
-            }
-        }
-
-        log(String.format(" Threshold: %.3f, ignored:%d/%d (%.2f)", threshold, ignored, N,
-                (double) ignored/(double)N));
-
-        status = true;
-        return status;
-    }
-
-    private boolean autocorrelation(double [] data, double [] dataOut) {
-        boolean status = false;
-
-        double sumsquared = 0;
-        int N = data.length;
-        for (int i=0; i<N; i++) {
-            double value = data[i];
-            sumsquared += value*value;
-        }
-
-        if (sumsquared>0) {
-            for (int i = 0; i < N; i++) {
-                dataOut[i] = 0;
-                for (int j = 0; j < N - i; j++) {
-
-                    dataOut[i] += data[j] * data[i + j];
-                }
-                dataOut[i] = dataOut[i] / sumsquared;
-            }
-            status = true;
-        }
-
-        return status;
-    }
-
-    private static void log(String msg) {
-        Log.v("Recorder", msg);
-    }
-}
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/audio/NativeAnalyzerThread.java b/apps/CtsVerifier/src/com/android/cts/verifier/audio/NativeAnalyzerThread.java
new file mode 100644
index 0000000..42f22aa
--- /dev/null
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/audio/NativeAnalyzerThread.java
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package com.android.cts.verifier.audio;
+
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioTrack;
+import android.media.MediaRecorder;
+import android.media.AudioRecord;
+import android.media.MediaRecorder;
+import android.util.Log;
+
+import android.os.Handler;
+import  android.os.Message;
+
+/**
+ * A thread that runs a native audio loopback analyzer.
+ */
+public class NativeAnalyzerThread {
+    private final int mSecondsToRun = 5;
+    private Handler mMessageHandler;
+    private Thread mThread;
+    private volatile boolean mEnabled = false;
+    private volatile double mLatencyMillis = 0.0;
+    private volatile double mConfidence = 0.0;
+    private int mInputPreset = 0;
+
+    static final int NATIVE_AUDIO_THREAD_MESSAGE_REC_STARTED = 892;
+    static final int NATIVE_AUDIO_THREAD_MESSAGE_OPEN_ERROR = 893;
+    static final int NATIVE_AUDIO_THREAD_MESSAGE_REC_ERROR = 894;
+    static final int NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE = 895;
+    static final int NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE_ERRORS = 896;
+
+    public void setInputPreset(int inputPreset) {
+        mInputPreset = inputPreset;
+    }
+
+    //JNI load
+    static {
+        try {
+            System.loadLibrary("audioloopback_jni");
+        } catch (UnsatisfiedLinkError e) {
+            log("Error loading loopback JNI library");
+            e.printStackTrace();
+        }
+
+        /* TODO: gracefully fail/notify if the library can't be loaded */
+    }
+
+    /**
+     * @return native audio context
+     */
+    private native long openAudio(int micSource);
+    private native int startAudio(long audio_context);
+    private native int stopAudio(long audio_context);
+    private native int closeAudio(long audio_context);
+    private native int getError(long audio_context);
+    private native boolean isRecordingComplete(long audio_context);
+    private native int analyze(long audio_context);
+    private native double getLatencyMillis(long audio_context);
+    private native double getConfidence(long audio_context);
+
+    public double getLatencyMillis() {
+        return mLatencyMillis;
+    }
+
+    public double getConfidence() {
+        return mConfidence;
+    }
+
+    public synchronized void startTest() {
+        if (mThread == null) {
+            mEnabled = true;
+            mThread = new Thread(mBackGroundTask);
+            mThread.start();
+        }
+    }
+
+    public synchronized void stopTest(int millis) throws InterruptedException {
+        mEnabled = false;
+        if (mThread != null) {
+            mThread.interrupt();
+            mThread.join(millis);
+            mThread = null;
+        }
+    }
+
+    private void sendMessage(int what) {
+        if (mMessageHandler != null) {
+            Message msg = Message.obtain();
+            msg.what = what;
+            mMessageHandler.sendMessage(msg);
+        }
+    }
+
+    private Runnable mBackGroundTask = () -> {
+        mLatencyMillis = 0.0;
+        mConfidence = 0.0;
+        boolean analysisComplete = false;
+
+        log(" Started capture test");
+        sendMessage(NATIVE_AUDIO_THREAD_MESSAGE_REC_STARTED);
+
+        long audioContext = openAudio(mInputPreset);
+        log(String.format("audioContext = 0x%X",audioContext));
+
+        if (audioContext == 0 ) {
+            log(" ERROR at JNI initialization");
+            sendMessage(NATIVE_AUDIO_THREAD_MESSAGE_OPEN_ERROR);
+        }  else if (mEnabled) {
+            int result = startAudio(audioContext);
+            if (result < 0) {
+                sendMessage(NATIVE_AUDIO_THREAD_MESSAGE_REC_ERROR);
+                mEnabled = false;
+            }
+
+            final long timeoutMillis = mSecondsToRun * 1000;
+            final long startedAtMillis = System.currentTimeMillis();
+            boolean timedOut = false;
+            int loopCounter = 0;
+            while (mEnabled && !timedOut) {
+                result = getError(audioContext);
+                if (result < 0) {
+                    sendMessage(NATIVE_AUDIO_THREAD_MESSAGE_REC_ERROR);
+                    break;
+                } else if (isRecordingComplete(audioContext)) {
+                    result = stopAudio(audioContext);
+                    if (result < 0) {
+                        sendMessage(NATIVE_AUDIO_THREAD_MESSAGE_REC_ERROR);
+                        break;
+                    }
+
+                    // Analyze the recording and measure latency.
+                    mThread.setPriority(Thread.MAX_PRIORITY);
+                    result = analyze(audioContext);
+                    if (result < 0) {
+                        break;
+                    } else {
+                        analysisComplete = true;
+                    }
+                    mLatencyMillis = getLatencyMillis(audioContext);
+                    mConfidence = getConfidence(audioContext);
+                    break;
+                } else {
+                    try {
+                        Thread.sleep(100);
+                    } catch (InterruptedException e) {
+                        e.printStackTrace();
+                    }
+                }
+                long now = System.currentTimeMillis();
+                timedOut = (now - startedAtMillis) > timeoutMillis;
+            }
+            log("latency: analyze returns " + result);
+            closeAudio(audioContext);
+
+            int what = (analysisComplete && result == 0)
+                    ? NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE
+                    : NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE_ERRORS;
+            sendMessage(what);
+        }
+    };
+
+    public void setMessageHandler(Handler messageHandler) {
+        mMessageHandler = messageHandler;
+    }
+
+    private static void log(String msg) {
+        Log.v("Loopback", msg);
+    }
+
+}  //end thread.
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/audio/NativeAudioThread.java b/apps/CtsVerifier/src/com/android/cts/verifier/audio/NativeAudioThread.java
deleted file mode 100644
index 0bb1298..0000000
--- a/apps/CtsVerifier/src/com/android/cts/verifier/audio/NativeAudioThread.java
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//package org.drrickorang.loopback;
-
-package com.android.cts.verifier.audio;
-
-import android.media.AudioFormat;
-import android.media.AudioManager;
-import android.media.AudioTrack;
-//import android.media.MediaPlayer;
-import android.media.AudioRecord;
-import android.media.MediaRecorder;
-import android.util.Log;
-
-import android.os.Handler;
-import  android.os.Message;
-
-/**
- * A thread/audio track based audio synth.
- */
-public class NativeAudioThread extends Thread {
-
-    public boolean isRunning = false;
-    double twoPi = 6.28318530718;
-
-    public int mSessionId;
-
-    public double[] mvSamples; //captured samples
-    int mSamplesIndex;
-
-    private final int mSecondsToRun = 2;
-    public int mSamplingRate = 48000;
-    private int mChannelConfigIn = AudioFormat.CHANNEL_IN_MONO;
-    private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
-
-    int mMinPlayBufferSizeInBytes = 0;
-    int mMinRecordBuffSizeInBytes = 0;
-    private int mChannelConfigOut = AudioFormat.CHANNEL_OUT_MONO;
-
-    int mMicSource = 0;
-
-    int mNumFramesToIgnore;
-
-//    private double [] samples = new double[50000];
-
-    boolean isPlaying = false;
-    private Handler mMessageHandler;
-    boolean isDestroying = false;
-    boolean hasDestroyingErrors = false;
-
-    static final int NATIVE_AUDIO_THREAD_MESSAGE_REC_STARTED = 892;
-    static final int NATIVE_AUDIO_THREAD_MESSAGE_REC_ERROR = 893;
-    static final int NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE = 894;
-    static final int NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE_ERRORS = 895;
-
-    public void setParams(int samplingRate, int playBufferInBytes, int recBufferInBytes,
-                          int micSource, int numFramesToIgnore) {
-        mSamplingRate = samplingRate;
-        mMinPlayBufferSizeInBytes = playBufferInBytes;
-        mMinRecordBuffSizeInBytes = recBufferInBytes;
-        mMicSource = micSource;
-        mNumFramesToIgnore = numFramesToIgnore;
-    }
-
-    //JNI load
-    static {
-        try {
-            System.loadLibrary("audioloopback_jni");
-        } catch (UnsatisfiedLinkError e) {
-            log("Error loading loopback JNI library");
-            e.printStackTrace();
-        }
-
-        /* TODO: gracefully fail/notify if the library can't be loaded */
-    }
-
-    //jni calls
-    public native long slesInit(int samplingRate, int frameCount, int micSource,
-                                int numFramesToIgnore);
-    public native int slesProcessNext(long sles_data, double[] samples, long offset);
-    public native int slesDestroy(long sles_data);
-
-    public void run() {
-
-        setPriority(Thread.MAX_PRIORITY);
-        isRunning = true;
-
-        //erase output buffer
-        if (mvSamples != null)
-            mvSamples = null;
-
-        //resize
-        int nNewSize = (int)(1.1* mSamplingRate * mSecondsToRun ); //10% more just in case
-        mvSamples = new double[nNewSize];
-        mSamplesIndex = 0; //reset index
-
-        //clear samples
-        for (int i=0; i<nNewSize; i++) {
-            mvSamples[i] = 0;
-        }
-
-        //start playing
-        isPlaying = true;
-
-
-        log(" Started capture test");
-        if (mMessageHandler != null) {
-            Message msg = Message.obtain();
-            msg.what = NATIVE_AUDIO_THREAD_MESSAGE_REC_STARTED;
-            mMessageHandler.sendMessage(msg);
-        }
-
-
-
-        log(String.format("about to init, sampling rate: %d, buffer:%d", mSamplingRate,
-                mMinPlayBufferSizeInBytes/2 ));
-        long sles_data = slesInit(mSamplingRate, mMinPlayBufferSizeInBytes/2, mMicSource,
-                                  mNumFramesToIgnore);
-        log(String.format("sles_data = 0x%X",sles_data));
-
-        if (sles_data == 0 ) {
-            log(" ERROR at JNI initialization");
-            if (mMessageHandler != null) {
-                Message msg = Message.obtain();
-                msg.what = NATIVE_AUDIO_THREAD_MESSAGE_REC_ERROR;
-                mMessageHandler.sendMessage(msg);
-            }
-        }  else {
-
-            //wait a little bit...
-            try {
-                sleep(10); //just to let it start properly?
-            } catch (InterruptedException e) {
-                e.printStackTrace();
-            }
-
-
-
-            mSamplesIndex = 0;
-            int totalSamplesRead = 0;
-            long offset = 0;
-            for (int ii = 0; ii < mSecondsToRun; ii++) {
-                log(String.format("block %d...", ii));
-                int samplesRead = slesProcessNext(sles_data, mvSamples,offset);
-                totalSamplesRead += samplesRead;
-
-                offset += samplesRead;
-                log(" [" + ii + "] jni samples read:" + samplesRead + "  currentOffset:" + offset);
-            }
-
-            log(String.format(" samplesRead: %d, sampleOffset:%d", totalSamplesRead, offset));
-            log(String.format("about to destroy..."));
-
-            runDestroy(sles_data);
-
-            int maxTry = 20;
-            int tryCount = 0;
-            //isDestroying = true;
-            while (isDestroying) {
-
-                try {
-                    sleep(40);
-                } catch (InterruptedException e) {
-                    e.printStackTrace();
-                }
-
-                tryCount++;
-
-                log("destroy try: " + tryCount);
-
-                if (tryCount >= maxTry) {
-                    hasDestroyingErrors = true;
-                    log("WARNING: waited for max time to properly destroy JNI.");
-                    break;
-                }
-            }
-            log(String.format("after destroying. TotalSamplesRead = %d", totalSamplesRead));
-
-            if (totalSamplesRead==0)
-            {
-                hasDestroyingErrors = true;
-            }
-
-            endTest();
-        }
-    }
-
-    public void setMessageHandler(Handler messageHandler) {
-        mMessageHandler = messageHandler;
-    }
-
-    private void runDestroy(final long sles_data ) {
-        isDestroying = true;
-
-        //start thread
-
-        final long local_sles_data = sles_data;
-        ////
-        Thread thread = new Thread(new Runnable() {
-            public void run() {
-                isDestroying = true;
-                log("**Start runnable destroy");
-
-                int status = slesDestroy(local_sles_data);
-                log(String.format("**End runnable destroy sles delete status: %d", status));
-                isDestroying = false;
-            }
-        });
-
-        thread.start();
-
-
-
-        log("end of runDestroy()");
-
-
-    }
-
-    public void togglePlay() {
-
-    }
-
-    public void runTest() {
-
-
-    }
-
-   public void endTest() {
-       log("--Ending capture test--");
-       isPlaying = false;
-
-
-       if (mMessageHandler != null) {
-           Message msg = Message.obtain();
-           if (hasDestroyingErrors)
-               msg.what = NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE_ERRORS;
-           else
-               msg.what = NATIVE_AUDIO_THREAD_MESSAGE_REC_COMPLETE;
-           mMessageHandler.sendMessage(msg);
-       }
-
-   }
-
-    public void finish() {
-
-        if (isRunning) {
-            isRunning = false;
-            try {
-                sleep(20);
-            } catch (InterruptedException e) {
-                e.printStackTrace();
-            }
-        }
-    }
-
-    private static void log(String msg) {
-        Log.v("Loopback", msg);
-    }
-
-    double [] getWaveData () {
-        return mvSamples;
-    }
-
-}  //end thread.