Merge "Implemented typed, thread-specific logging system. Currently supported types are:  * Strings  * Integers  * Floats  * Timestamps  * Process IDs"
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index ece64fd..60effe2 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -103,7 +103,7 @@
 
 // establish binder interface to camera service
 template <typename TCam, typename TCamTraits>
-const sp<::android::hardware::ICameraService>& CameraBase<TCam, TCamTraits>::getCameraService()
+const sp<::android::hardware::ICameraService> CameraBase<TCam, TCamTraits>::getCameraService()
 {
     Mutex::Autolock _l(gLock);
     if (gCameraService.get() == 0) {
@@ -141,7 +141,7 @@
     ALOGV("%s: connect", __FUNCTION__);
     sp<TCam> c = new TCam(cameraId);
     sp<TCamCallbacks> cl = c;
-    const sp<::android::hardware::ICameraService>& cs = getCameraService();
+    const sp<::android::hardware::ICameraService> cs = getCameraService();
 
     binder::Status ret;
     if (cs != nullptr) {
@@ -249,7 +249,7 @@
 template <typename TCam, typename TCamTraits>
 status_t CameraBase<TCam, TCamTraits>::getCameraInfo(int cameraId,
         struct hardware::CameraInfo* cameraInfo) {
-    const sp<::android::hardware::ICameraService>& cs = getCameraService();
+    const sp<::android::hardware::ICameraService> cs = getCameraService();
     if (cs == 0) return UNKNOWN_ERROR;
     binder::Status res = cs->getCameraInfo(cameraId, cameraInfo);
     return res.isOk() ? OK : res.serviceSpecificErrorCode();
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
index 96ecfa0..3512730 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
@@ -122,5 +122,5 @@
 
     void prepare2(int maxCount, int streamId);
 
-    void setDeferredConfiguration(int streamId, in OutputConfiguration outputConfiguration);
+    void finalizeOutputConfigurations(int streamId, in OutputConfiguration outputConfiguration);
 }
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 12d0da8..f570b7f 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -21,8 +21,9 @@
 #include <utils/Log.h>
 
 #include <camera/camera2/OutputConfiguration.h>
-#include <gui/Surface.h>
 #include <binder/Parcel.h>
+#include <gui/Surface.h>
+#include <utils/String8.h>
 
 namespace android {
 
@@ -30,8 +31,9 @@
 const int OutputConfiguration::INVALID_ROTATION = -1;
 const int OutputConfiguration::INVALID_SET_ID = -1;
 
-sp<IGraphicBufferProducer> OutputConfiguration::getGraphicBufferProducer() const {
-    return mGbp;
+const std::vector<sp<IGraphicBufferProducer>>&
+        OutputConfiguration::getGraphicBufferProducers() const {
+    return mGbps;
 }
 
 int OutputConfiguration::getRotation() const {
@@ -54,12 +56,22 @@
     return mHeight;
 }
 
+bool OutputConfiguration::isDeferred() const {
+    return mIsDeferred;
+}
+
+bool OutputConfiguration::isShared() const {
+    return mIsShared;
+}
+
 OutputConfiguration::OutputConfiguration() :
         mRotation(INVALID_ROTATION),
         mSurfaceSetID(INVALID_SET_ID),
         mSurfaceType(SURFACE_TYPE_UNKNOWN),
         mWidth(0),
-        mHeight(0) {
+        mHeight(0),
+        mIsDeferred(false),
+        mIsShared(false) {
 }
 
 OutputConfiguration::OutputConfiguration(const android::Parcel& parcel) :
@@ -103,39 +115,57 @@
         return err;
     }
 
-    view::Surface surfaceShim;
-    if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
-        // Read surface failure for deferred surface configuration is expected.
-        if (surfaceType == SURFACE_TYPE_SURFACE_VIEW ||
-                surfaceType == SURFACE_TYPE_SURFACE_TEXTURE) {
-            ALOGV("%s: Get null surface from a deferred surface configuration (%dx%d)",
-                    __FUNCTION__, width, height);
-            err = OK;
-        } else {
-            ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
-            return err;
-        }
+    int isDeferred = 0;
+    if ((err = parcel->readInt32(&isDeferred)) != OK) {
+        ALOGE("%s: Failed to read surface isDeferred flag from parcel", __FUNCTION__);
+        return err;
     }
 
-    mGbp = surfaceShim.graphicBufferProducer;
+    int isShared = 0;
+    if ((err = parcel->readInt32(&isShared)) != OK) {
+        ALOGE("%s: Failed to read surface isShared flag from parcel", __FUNCTION__);
+        return err;
+    }
+
+    if (isDeferred && surfaceType != SURFACE_TYPE_SURFACE_VIEW &&
+            surfaceType != SURFACE_TYPE_SURFACE_TEXTURE) {
+        ALOGE("%s: Invalid surface type for deferred configuration", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    std::vector<view::Surface> surfaceShims;
+    if ((err = parcel->readParcelableVector(&surfaceShims)) != OK) {
+        ALOGE("%s: Failed to read surface(s) from parcel", __FUNCTION__);
+        return err;
+    }
+
     mRotation = rotation;
     mSurfaceSetID = setID;
     mSurfaceType = surfaceType;
     mWidth = width;
     mHeight = height;
+    mIsDeferred = isDeferred != 0;
+    mIsShared = isShared != 0;
+    for (auto& surface : surfaceShims) {
+        ALOGV("%s: OutputConfiguration: %p, name %s", __FUNCTION__,
+                surface.graphicBufferProducer.get(),
+                String8(surface.name).string());
+        mGbps.push_back(surface.graphicBufferProducer);
+    }
 
-    ALOGV("%s: OutputConfiguration: bp = %p, name = %s, rotation = %d, setId = %d,"
-            "surfaceType = %d", __FUNCTION__, mGbp.get(), String8(surfaceShim.name).string(),
-            mRotation, mSurfaceSetID, mSurfaceType);
+    ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d",
+            __FUNCTION__, mRotation, mSurfaceSetID, mSurfaceType);
 
     return err;
 }
 
 OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
         int surfaceSetID) {
-    mGbp = gbp;
+    mGbps.push_back(gbp);
     mRotation = rotation;
     mSurfaceSetID = surfaceSetID;
+    mIsDeferred = false;
+    mIsShared = false;
 }
 
 status_t OutputConfiguration::writeToParcel(android::Parcel* parcel) const {
@@ -158,14 +188,56 @@
     err = parcel->writeInt32(mHeight);
     if (err != OK) return err;
 
-    view::Surface surfaceShim;
-    surfaceShim.name = String16("unknown_name"); // name of surface
-    surfaceShim.graphicBufferProducer = mGbp;
+    err = parcel->writeInt32(mIsDeferred ? 1 : 0);
+    if (err != OK) return err;
 
-    err = surfaceShim.writeToParcel(parcel);
+    err = parcel->writeInt32(mIsShared ? 1 : 0);
+    if (err != OK) return err;
+
+    std::vector<view::Surface> surfaceShims;
+    for (auto& gbp : mGbps) {
+        view::Surface surfaceShim;
+        surfaceShim.name = String16("unknown_name"); // name of surface
+        surfaceShim.graphicBufferProducer = gbp;
+        surfaceShims.push_back(surfaceShim);
+    }
+    err = parcel->writeParcelableVector(surfaceShims);
     if (err != OK) return err;
 
     return OK;
 }
 
+bool OutputConfiguration::gbpsEqual(const OutputConfiguration& other) const {
+    const std::vector<sp<IGraphicBufferProducer> >& otherGbps =
+            other.getGraphicBufferProducers();
+
+    if (mGbps.size() != otherGbps.size()) {
+        return false;
+    }
+
+    for (size_t i = 0; i < mGbps.size(); i++) {
+        if (mGbps[i] != otherGbps[i]) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+bool OutputConfiguration::gbpsLessThan(const OutputConfiguration& other) const {
+    const std::vector<sp<IGraphicBufferProducer> >& otherGbps =
+            other.getGraphicBufferProducers();
+
+    if (mGbps.size() !=  otherGbps.size()) {
+        return mGbps.size() < otherGbps.size();
+    }
+
+    for (size_t i = 0; i < mGbps.size(); i++) {
+        if (mGbps[i] != otherGbps[i]) {
+            return mGbps[i] < otherGbps[i];
+        }
+    }
+
+    return false;
+}
 }; // namespace android
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index ab99e38..c0da592 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -278,6 +278,7 @@
         case ACAMERA_CONTROL_SCENE_MODE:
         case ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE:
         case ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST:
+        case ACAMERA_CONTROL_ENABLE_ZSL:
         case ACAMERA_EDGE_MODE:
         case ACAMERA_FLASH_MODE:
         case ACAMERA_HOT_PIXEL_MODE:
diff --git a/drm/libmediadrm/Android.mk b/drm/libmediadrm/Android.mk
index 3f0e663..14740e6 100644
--- a/drm/libmediadrm/Android.mk
+++ b/drm/libmediadrm/Android.mk
@@ -7,14 +7,21 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-    Crypto.cpp \
-    Drm.cpp \
     DrmSessionManager.cpp \
     ICrypto.cpp \
     IDrm.cpp \
     IDrmClient.cpp \
     IMediaDrmService.cpp \
     SharedLibrary.cpp
+ifeq ($(ENABLE_TREBLE), true)
+LOCAL_SRC_FILES += \
+    DrmHal.cpp \
+    CryptoHal.cpp
+else
+LOCAL_SRC_FILES += \
+    Drm.cpp \
+    Crypto.cpp
+endif
 
 LOCAL_SHARED_LIBRARIES := \
     libbinder \
@@ -24,6 +31,13 @@
     libmediautils \
     libstagefright_foundation \
     libutils
+ifeq ($(ENABLE_TREBLE), true)
+LOCAL_SHARED_LIBRARIES += \
+    android.hidl.base@1.0 \
+    android.hardware.drm@1.0 \
+    libhidlbase \
+    libhidlmemory
+endif
 
 LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
 
diff --git a/drm/libmediadrm/Crypto.cpp b/drm/libmediadrm/Crypto.cpp
index 79633cb..d93dad6 100644
--- a/drm/libmediadrm/Crypto.cpp
+++ b/drm/libmediadrm/Crypto.cpp
@@ -233,16 +233,12 @@
     return mPlugin->requiresSecureDecoderComponent(mime);
 }
 
-ssize_t Crypto::decrypt(
-        DestinationType dstType,
-        const uint8_t key[16],
-        const uint8_t iv[16],
-        CryptoPlugin::Mode mode,
-        const CryptoPlugin::Pattern &pattern,
-        const sp<IMemory> &sharedBuffer, size_t offset,
+ssize_t Crypto::decrypt(const uint8_t key[16], const uint8_t iv[16],
+        CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
+        const sp<IMemory> &source, size_t offset,
         const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
-        void *dstPtr,
-        AString *errorDetailMsg) {
+        const ICrypto::DestinationBuffer &destination, AString *errorDetailMsg) {
+
     Mutex::Autolock autoLock(mLock);
 
     if (mInitCheck != OK) {
@@ -253,12 +249,21 @@
         return -EINVAL;
     }
 
-    const void *srcPtr = static_cast<uint8_t *>(sharedBuffer->pointer()) + offset;
+    const void *srcPtr = static_cast<uint8_t *>(source->pointer()) + offset;
 
-    return mPlugin->decrypt(
-            dstType != kDestinationTypeVmPointer,
-            key, iv, mode, pattern, srcPtr, subSamples, numSubSamples, dstPtr,
-            errorDetailMsg);
+    void *destPtr;
+    bool secure = false;
+    if (destination.mType == kDestinationTypeNativeHandle) {
+        destPtr = static_cast<void *>(destination.mHandle);
+        secure = true;
+    } else {
+        destPtr = destination.mSharedMemory->pointer();
+    }
+
+    ssize_t result = mPlugin->decrypt(secure, key, iv, mode, pattern, srcPtr, subSamples,
+            numSubSamples, destPtr, errorDetailMsg);
+
+    return result;
 }
 
 void Crypto::notifyResolution(uint32_t width, uint32_t height) {
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
new file mode 100644
index 0000000..f1f3b01
--- /dev/null
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CryptoHal"
+#include <utils/Log.h>
+#include <dirent.h>
+#include <dlfcn.h>
+
+#include <android/hardware/drm/1.0/types.h>
+
+#include <binder/IMemory.h>
+#include <cutils/native_handle.h>
+#include <media/CryptoHal.h>
+#include <media/hardware/CryptoAPI.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+
+using ::android::hardware::drm::V1_0::BufferType;
+using ::android::hardware::drm::V1_0::DestinationBuffer;
+using ::android::hardware::drm::V1_0::ICryptoFactory;
+using ::android::hardware::drm::V1_0::ICryptoPlugin;
+using ::android::hardware::drm::V1_0::Mode;
+using ::android::hardware::drm::V1_0::Pattern;
+using ::android::hardware::drm::V1_0::SharedBuffer;
+using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::drm::V1_0::SubSample;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+
+namespace android {
+
+static status_t toStatusT(Status status) {
+    switch (status) {
+    case Status::OK:
+        return OK;
+    case Status::ERROR_DRM_NO_LICENSE:
+        return ERROR_DRM_NO_LICENSE;
+    case Status::ERROR_DRM_LICENSE_EXPIRED:
+        return ERROR_DRM_LICENSE_EXPIRED;
+    case Status::ERROR_DRM_RESOURCE_BUSY:
+        return ERROR_DRM_RESOURCE_BUSY;
+    case Status::ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION:
+        return ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION;
+    case Status::ERROR_DRM_SESSION_NOT_OPENED:
+        return ERROR_DRM_SESSION_NOT_OPENED;
+    case Status::ERROR_DRM_CANNOT_HANDLE:
+        return ERROR_DRM_CANNOT_HANDLE;
+    default:
+        return UNKNOWN_ERROR;
+    }
+}
+
+
+static hidl_vec<uint8_t> toHidlVec(const Vector<uint8_t> &vector) {
+    hidl_vec<uint8_t> vec;
+    vec.setToExternal(const_cast<uint8_t *>(vector.array()), vector.size());
+    return vec;
+}
+
+static hidl_vec<uint8_t> toHidlVec(const void *ptr, size_t size) {
+    hidl_vec<uint8_t> vec;
+    vec.resize(size);
+    memcpy(vec.data(), ptr, size);
+    return vec;
+}
+
+static hidl_array<uint8_t, 16> toHidlArray16(const uint8_t *ptr) {
+    if (!ptr) {
+        return hidl_array<uint8_t, 16>();
+    }
+    return hidl_array<uint8_t, 16>(ptr);
+}
+
+
+static ::SharedBuffer toSharedBuffer(const sp<IMemory>& sharedBuffer) {
+    ssize_t offset;
+    size_t size;
+    sharedBuffer->getMemory(&offset, &size);
+
+    ::SharedBuffer buffer;
+    buffer.offset = offset >= 0 ? offset : 0;
+    buffer.size = size;
+    return buffer;
+}
+
+static String8 toString8(hidl_string hString) {
+    return String8(hString.c_str());
+}
+
+
+CryptoHal::CryptoHal()
+    : mFactory(makeCryptoFactory()),
+      mInitCheck((mFactory == NULL) ? ERROR_UNSUPPORTED : NO_INIT),
+      mHeapBase(NULL) {
+}
+
+CryptoHal::~CryptoHal() {
+}
+
+
+sp<ICryptoFactory> CryptoHal::makeCryptoFactory() {
+    sp<ICryptoFactory> factory = ICryptoFactory::getService("crypto");
+    if (factory == NULL) {
+        ALOGE("Failed to make crypto factory");
+    }
+    return factory;
+}
+
+sp<ICryptoPlugin> CryptoHal::makeCryptoPlugin(const uint8_t uuid[16],
+        const void *initData, size_t initDataSize) {
+    if (mFactory == NULL){
+        return NULL;
+    }
+
+    sp<ICryptoPlugin> plugin;
+    Return<void> hResult = mFactory->createPlugin(toHidlArray16(uuid),
+            toHidlVec(initData, initDataSize),
+            [&](Status status, const sp<ICryptoPlugin>& hPlugin) {
+                if (status != Status::OK) {
+                    ALOGE("Failed to make crypto plugin");
+                    return;
+                }
+                plugin = hPlugin;
+            }
+        );
+    return plugin;
+}
+
+
+status_t CryptoHal::initCheck() const {
+    return mInitCheck;
+}
+
+
+bool CryptoHal::isCryptoSchemeSupported(const uint8_t uuid[16]) {
+    Mutex::Autolock autoLock(mLock);
+    if (mFactory != NULL) {
+        return mFactory->isCryptoSchemeSupported(uuid);
+    }
+    return false;
+}
+
+status_t CryptoHal::createPlugin(
+        const uint8_t uuid[16], const void *data, size_t size) {
+    Mutex::Autolock autoLock(mLock);
+
+    mPlugin = makeCryptoPlugin(uuid, data, size);
+
+    if (mPlugin == NULL) {
+        mInitCheck = ERROR_UNSUPPORTED;
+    } else {
+        mInitCheck = OK;
+    }
+
+    return mInitCheck;
+}
+
+status_t CryptoHal::destroyPlugin() {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    mPlugin.clear();
+    return OK;
+}
+
+bool CryptoHal::requiresSecureDecoderComponent(const char *mime) const {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    return mPlugin->requiresSecureDecoderComponent(hidl_string(mime));
+}
+
+
+/**
+ * If the heap base isn't set, get the heap base from the IMemory
+ * and send it to the HAL so it can map a remote heap of the same
+ * size.  Once the heap base is established, shared memory buffers
+ * are sent by providing an offset into the heap and a buffer size.
+ */
+status_t CryptoHal::setHeapBase(const sp<IMemory>& sharedBuffer) {
+    sp<IMemoryHeap> heap = sharedBuffer->getMemory(NULL, NULL);
+    if (mHeapBase != heap->getBase()) {
+        int fd = heap->getHeapID();
+        native_handle_t* nativeHandle = native_handle_create(1, 0);
+        nativeHandle->data[0] = fd;
+        auto hidlHandle = hidl_handle(nativeHandle);
+        auto hidlMemory = hidl_memory("ashmem", hidlHandle, heap->getSize());
+        mHeapBase = heap->getBase();
+        Return<void> hResult = mPlugin->setSharedBufferBase(hidlMemory);
+        if (!hResult.isOk()) {
+            return DEAD_OBJECT;
+        }
+    }
+    return OK;
+}
+
+ssize_t CryptoHal::decrypt(const uint8_t keyId[16], const uint8_t iv[16],
+        CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
+        const sp<IMemory> &source, size_t offset,
+        const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+        const ICrypto::DestinationBuffer &destination, AString *errorDetailMsg) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    // Establish the base of the shared memory heap
+    setHeapBase(source);
+
+    Mode hMode;
+    switch(mode) {
+    case CryptoPlugin::kMode_Unencrypted:
+        hMode = Mode::UNENCRYPTED ;
+        break;
+    case CryptoPlugin::kMode_AES_CTR:
+        hMode = Mode::AES_CTR;
+        break;
+    case CryptoPlugin::kMode_AES_WV:
+        hMode = Mode::AES_CBC_CTS;
+        break;
+    case CryptoPlugin::kMode_AES_CBC:
+        hMode = Mode::AES_CBC;
+        break;
+    default:
+        return UNKNOWN_ERROR;
+    }
+
+    Pattern hPattern;
+    hPattern.encryptBlocks = pattern.mEncryptBlocks;
+    hPattern.skipBlocks = pattern.mSkipBlocks;
+
+    std::vector<SubSample> stdSubSamples;
+    for (size_t i = 0; i < numSubSamples; i++) {
+        SubSample subSample;
+        subSample.numBytesOfClearData = subSamples[i].mNumBytesOfClearData;
+        subSample.numBytesOfEncryptedData = subSamples[i].mNumBytesOfEncryptedData;
+        stdSubSamples.push_back(subSample);
+    }
+    auto hSubSamples = hidl_vec<SubSample>(stdSubSamples);
+
+    bool secure;
+    ::DestinationBuffer hDestination;
+    if (destination.mType == kDestinationTypeSharedMemory) {
+        hDestination.type = BufferType::SHARED_MEMORY;
+        hDestination.nonsecureMemory = toSharedBuffer(destination.mSharedMemory);
+        secure = false;
+    } else {
+        hDestination.type = BufferType::NATIVE_HANDLE;
+        hDestination.secureMemory = hidl_handle(destination.mHandle);
+        secure = true;
+    }
+
+
+    status_t err = UNKNOWN_ERROR;
+    uint32_t bytesWritten = 0;
+
+    Return<void> hResult = mPlugin->decrypt(secure, toHidlArray16(keyId), toHidlArray16(iv), hMode,
+            hPattern, hSubSamples, toSharedBuffer(source), offset, hDestination,
+            [&](Status status, uint32_t hBytesWritten, hidl_string hDetailedError) {
+                if (status == Status::OK) {
+                    bytesWritten = hBytesWritten;
+                    *errorDetailMsg = toString8(hDetailedError);
+                }
+                err = toStatusT(status);
+            }
+        );
+
+    if (!hResult.isOk()) {
+        err = DEAD_OBJECT;
+    }
+
+    if (err == OK) {
+        return bytesWritten;
+    }
+    return err;
+}
+
+void CryptoHal::notifyResolution(uint32_t width, uint32_t height) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return;
+    }
+
+    mPlugin->notifyResolution(width, height);
+}
+
+status_t CryptoHal::setMediaDrmSession(const Vector<uint8_t> &sessionId) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    return toStatusT(mPlugin->setMediaDrmSession(toHidlVec(sessionId)));
+}
+
+}  // namespace android
diff --git a/drm/libmediadrm/Drm.cpp b/drm/libmediadrm/Drm.cpp
index 07e9414..e3176e3 100644
--- a/drm/libmediadrm/Drm.cpp
+++ b/drm/libmediadrm/Drm.cpp
@@ -303,7 +303,8 @@
     return true;
 }
 
-status_t Drm::createPlugin(const uint8_t uuid[16]) {
+status_t Drm::createPlugin(const uint8_t uuid[16],
+                           const String8& /* appPackageName */) {
     Mutex::Autolock autoLock(mLock);
 
     if (mPlugin != NULL) {
@@ -319,7 +320,12 @@
     }
 
     status_t result = mFactory->createDrmPlugin(uuid, &mPlugin);
-    mPlugin->setListener(this);
+    if (mPlugin) {
+        mPlugin->setListener(this);
+    } else {
+        ALOGE("Failed to create plugin");
+        return UNEXPECTED_NULL;
+    }
     return result;
 }
 
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
new file mode 100644
index 0000000..42dce35
--- /dev/null
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -0,0 +1,942 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DrmHal"
+#include <utils/Log.h>
+
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <dirent.h>
+#include <dlfcn.h>
+
+#include <android/hardware/drm/1.0/IDrmFactory.h>
+#include <android/hardware/drm/1.0/IDrmPlugin.h>
+#include <android/hardware/drm/1.0/types.h>
+
+#include <media/DrmHal.h>
+#include <media/DrmSessionClientInterface.h>
+#include <media/DrmSessionManager.h>
+#include <media/drm/DrmAPI.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+
+using ::android::hardware::drm::V1_0::EventType;
+using ::android::hardware::drm::V1_0::IDrmFactory;
+using ::android::hardware::drm::V1_0::IDrmPlugin;
+using ::android::hardware::drm::V1_0::KeyedVector;
+using ::android::hardware::drm::V1_0::KeyRequestType;
+using ::android::hardware::drm::V1_0::KeyStatus;
+using ::android::hardware::drm::V1_0::KeyStatusType;
+using ::android::hardware::drm::V1_0::KeyType;
+using ::android::hardware::drm::V1_0::KeyValue;
+using ::android::hardware::drm::V1_0::SecureStop;
+using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+namespace android {
+
+static inline int getCallingPid() {
+    return IPCThreadState::self()->getCallingPid();
+}
+
+static bool checkPermission(const char* permissionString) {
+    if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
+    bool ok = checkCallingPermission(String16(permissionString));
+    if (!ok) ALOGE("Request requires %s", permissionString);
+    return ok;
+}
+
+static const Vector<uint8_t> toVector(const hidl_vec<uint8_t> &vec) {
+    Vector<uint8_t> vector;
+    vector.appendArray(vec.data(), vec.size());
+    return *const_cast<const Vector<uint8_t> *>(&vector);
+}
+
+static hidl_vec<uint8_t> toHidlVec(const Vector<uint8_t> &vector) {
+    hidl_vec<uint8_t> vec;
+    vec.setToExternal(const_cast<uint8_t *>(vector.array()), vector.size());
+    return vec;
+}
+
+static String8 toString8(const hidl_string &string) {
+    return String8(string.c_str());
+}
+
+static hidl_string toHidlString(const String8& string) {
+    return hidl_string(string.string());
+}
+
+
+static ::KeyedVector toHidlKeyedVector(const KeyedVector<String8, String8>&
+        keyedVector) {
+    std::vector<KeyValue> stdKeyedVector;
+    for (size_t i = 0; i < keyedVector.size(); i++) {
+        KeyValue keyValue;
+        keyValue.key = toHidlString(keyedVector.keyAt(i));
+        keyValue.value = toHidlString(keyedVector.valueAt(i));
+        stdKeyedVector.push_back(keyValue);
+    }
+    return ::KeyedVector(stdKeyedVector);
+}
+
+static KeyedVector<String8, String8> toKeyedVector(const ::KeyedVector&
+        hKeyedVector) {
+    KeyedVector<String8, String8> keyedVector;
+    for (size_t i = 0; i < hKeyedVector.size(); i++) {
+        keyedVector.add(toString8(hKeyedVector[i].key),
+                toString8(hKeyedVector[i].value));
+    }
+    return keyedVector;
+}
+
+static List<Vector<uint8_t> > toSecureStops(const hidl_vec<SecureStop>&
+        hSecureStops) {
+    List<Vector<uint8_t> > secureStops;
+    for (size_t i = 0; i < hSecureStops.size(); i++) {
+        secureStops.push_back(toVector(hSecureStops[i].opaqueData));
+    }
+    return secureStops;
+}
+
+static status_t toStatusT(Status status) {
+    switch (status) {
+    case Status::OK:
+        return OK;
+        break;
+    case Status::ERROR_DRM_NO_LICENSE:
+        return ERROR_DRM_NO_LICENSE;
+        break;
+    case Status::ERROR_DRM_LICENSE_EXPIRED:
+        return ERROR_DRM_LICENSE_EXPIRED;
+        break;
+    case Status::ERROR_DRM_SESSION_NOT_OPENED:
+        return ERROR_DRM_SESSION_NOT_OPENED;
+        break;
+    case Status::ERROR_DRM_CANNOT_HANDLE:
+        return ERROR_DRM_CANNOT_HANDLE;
+        break;
+    case Status::ERROR_DRM_INVALID_STATE:
+        return ERROR_DRM_TAMPER_DETECTED;
+        break;
+    case Status::BAD_VALUE:
+        return BAD_VALUE;
+        break;
+    case Status::ERROR_DRM_NOT_PROVISIONED:
+        return ERROR_DRM_NOT_PROVISIONED;
+        break;
+    case Status::ERROR_DRM_RESOURCE_BUSY:
+        return ERROR_DRM_RESOURCE_BUSY;
+        break;
+    case Status::ERROR_DRM_DEVICE_REVOKED:
+        return ERROR_DRM_DEVICE_REVOKED;
+        break;
+    case Status::ERROR_DRM_UNKNOWN:
+    default:
+        return ERROR_DRM_UNKNOWN;
+        break;
+    }
+}
+
+
+Mutex DrmHal::mLock;
+
+struct DrmSessionClient : public DrmSessionClientInterface {
+    explicit DrmSessionClient(DrmHal* drm) : mDrm(drm) {}
+
+    virtual bool reclaimSession(const Vector<uint8_t>& sessionId) {
+        sp<DrmHal> drm = mDrm.promote();
+        if (drm == NULL) {
+            return true;
+        }
+        status_t err = drm->closeSession(sessionId);
+        if (err != OK) {
+            return false;
+        }
+        drm->sendEvent(EventType::SESSION_RECLAIMED,
+                toHidlVec(sessionId), hidl_vec<uint8_t>());
+        return true;
+    }
+
+protected:
+    virtual ~DrmSessionClient() {}
+
+private:
+    wp<DrmHal> mDrm;
+
+    DISALLOW_EVIL_CONSTRUCTORS(DrmSessionClient);
+};
+
+DrmHal::DrmHal()
+   : mDrmSessionClient(new DrmSessionClient(this)),
+     mFactory(makeDrmFactory()),
+     mInitCheck((mFactory == NULL) ? ERROR_UNSUPPORTED : NO_INIT) {
+}
+
+DrmHal::~DrmHal() {
+    DrmSessionManager::Instance()->removeDrm(mDrmSessionClient);
+}
+
+sp<IDrmFactory> DrmHal::makeDrmFactory() {
+    sp<IDrmFactory> factory = IDrmFactory::getService("drm");
+    if (factory == NULL) {
+        ALOGE("Failed to make drm factory");
+    }
+    return factory;
+}
+
+sp<IDrmPlugin> DrmHal::makeDrmPlugin(const uint8_t uuid[16],
+        const String8& appPackageName) {
+    if (mFactory == NULL){
+        return NULL;
+    }
+
+    sp<IDrmPlugin> plugin;
+    Return<void> hResult = mFactory->createPlugin(uuid, appPackageName.string(),
+            [&](Status status, const sp<IDrmPlugin>& hPlugin) {
+      if (status != Status::OK) {
+        ALOGD("Failed to make drm plugin");
+        return;
+      }
+      plugin = hPlugin;
+    }
+    );
+    return plugin;
+}
+
+status_t DrmHal::initCheck() const {
+    return mInitCheck;
+}
+
+status_t DrmHal::setListener(const sp<IDrmClient>& listener)
+{
+    Mutex::Autolock lock(mEventLock);
+    if (mListener != NULL){
+        IInterface::asBinder(mListener)->unlinkToDeath(this);
+    }
+    if (listener != NULL) {
+        IInterface::asBinder(listener)->linkToDeath(this);
+    }
+    mListener = listener;
+    return NO_ERROR;
+}
+
+Return<void> DrmHal::sendEvent(EventType hEventType,
+        const hidl_vec<uint8_t>& sessionId, const hidl_vec<uint8_t>& data) {
+
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Parcel obj;
+        writeByteArray(obj, sessionId);
+        writeByteArray(obj, data);
+
+        Mutex::Autolock lock(mNotifyLock);
+        DrmPlugin::EventType eventType;
+        switch(hEventType) {
+        case EventType::PROVISION_REQUIRED:
+            eventType = DrmPlugin::kDrmPluginEventProvisionRequired;
+            break;
+        case EventType::KEY_NEEDED:
+            eventType = DrmPlugin::kDrmPluginEventKeyNeeded;
+            break;
+        case EventType::KEY_EXPIRED:
+            eventType = DrmPlugin::kDrmPluginEventKeyExpired;
+            break;
+        case EventType::VENDOR_DEFINED:
+            eventType = DrmPlugin::kDrmPluginEventVendorDefined;
+            break;
+        default:
+            return Void();
+        }
+        listener->notify(eventType, 0, &obj);
+    }
+    return Void();
+}
+
+Return<void> DrmHal::sendExpirationUpdate(const hidl_vec<uint8_t>& sessionId,
+        int64_t expiryTimeInMS) {
+
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Parcel obj;
+        writeByteArray(obj, sessionId);
+        obj.writeInt64(expiryTimeInMS);
+
+        Mutex::Autolock lock(mNotifyLock);
+        listener->notify(DrmPlugin::kDrmPluginEventExpirationUpdate, 0, &obj);
+    }
+    return Void();
+}
+
+Return<void> DrmHal::sendKeysChange(const hidl_vec<uint8_t>& sessionId,
+        const hidl_vec<KeyStatus>& keyStatusList, bool hasNewUsableKey) {
+
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Parcel obj;
+        writeByteArray(obj, sessionId);
+
+        size_t nKeys = keyStatusList.size();
+        obj.writeInt32(nKeys);
+        for (size_t i = 0; i < nKeys; ++i) {
+            const KeyStatus &keyStatus = keyStatusList[i];
+            writeByteArray(obj, keyStatus.keyId);
+            uint32_t type;
+            switch(keyStatus.type) {
+            case KeyStatusType::USABLE:
+                type = DrmPlugin::kKeyStatusType_Usable;
+                break;
+            case KeyStatusType::EXPIRED:
+                type = DrmPlugin::kKeyStatusType_Expired;
+                break;
+            case KeyStatusType::OUTPUTNOTALLOWED:
+                type = DrmPlugin::kKeyStatusType_OutputNotAllowed;
+                break;
+            case KeyStatusType::STATUSPENDING:
+                type = DrmPlugin::kKeyStatusType_StatusPending;
+                break;
+            case KeyStatusType::INTERNALERROR:
+            default:
+                type = DrmPlugin::kKeyStatusType_InternalError;
+                break;
+            }
+            obj.writeInt32(type);
+        }
+        obj.writeInt32(hasNewUsableKey);
+
+        Mutex::Autolock lock(mNotifyLock);
+        listener->notify(DrmPlugin::kDrmPluginEventKeysChange, 0, &obj);
+    }
+    return Void();
+}
+
+bool DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) {
+    Mutex::Autolock autoLock(mLock);
+    bool result = false;
+
+    if (mFactory != NULL && mFactory->isCryptoSchemeSupported(uuid)) {
+        if (mimeType != "") {
+            result = mFactory->isContentTypeSupported(mimeType.string());
+        }
+    }
+    return result;
+}
+
+status_t DrmHal::createPlugin(const uint8_t uuid[16],
+        const String8& appPackageName) {
+    Mutex::Autolock autoLock(mLock);
+
+    mPlugin = makeDrmPlugin(uuid, appPackageName);
+
+    if (mPlugin == NULL) {
+        mInitCheck = ERROR_UNSUPPORTED;
+    } else {
+        mInitCheck = OK;
+        mPlugin->setListener(this);
+    }
+
+    return mInitCheck;
+}
+
+status_t DrmHal::destroyPlugin() {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    setListener(NULL);
+    mPlugin.clear();
+
+    return OK;
+}
+
+status_t DrmHal::openSession(Vector<uint8_t> &sessionId) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    status_t  err = UNKNOWN_ERROR;
+
+    bool retry = true;
+    do {
+        hidl_vec<uint8_t> hSessionId;
+
+        Return<void> hResult = mPlugin->openSession(
+                [&](Status status, const hidl_vec<uint8_t>& id) {
+                    if (status == Status::OK) {
+                        sessionId = toVector(id);
+                    }
+                    err = toStatusT(status);
+                }
+            );
+
+        if (!hResult.isOk()) {
+            err = DEAD_OBJECT;
+        }
+
+        if (err == ERROR_DRM_RESOURCE_BUSY && retry) {
+            mLock.unlock();
+            // reclaimSession may call back to closeSession, since mLock is
+            // shared between Drm instances, we should unlock here to avoid
+            // deadlock.
+            retry = DrmSessionManager::Instance()->reclaimSession(getCallingPid());
+            mLock.lock();
+        } else {
+            retry = false;
+        }
+    } while (retry);
+
+    if (err == OK) {
+        DrmSessionManager::Instance()->addSession(getCallingPid(),
+                mDrmSessionClient, sessionId);
+    }
+    return err;
+}
+
+status_t DrmHal::closeSession(Vector<uint8_t> const &sessionId) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    Status status = mPlugin->closeSession(toHidlVec(sessionId));
+    if (status == Status::OK) {
+        DrmSessionManager::Instance()->removeSession(sessionId);
+    }
+    return toStatusT(status);
+}
+
+status_t DrmHal::getKeyRequest(Vector<uint8_t> const &sessionId,
+        Vector<uint8_t> const &initData, String8 const &mimeType,
+        DrmPlugin::KeyType keyType, KeyedVector<String8,
+        String8> const &optionalParameters, Vector<uint8_t> &request,
+        String8 &defaultUrl, DrmPlugin::KeyRequestType *keyRequestType) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    KeyType hKeyType;
+    if (keyType == DrmPlugin::kKeyType_Streaming) {
+        hKeyType = KeyType::STREAMING;
+    } else if (keyType == DrmPlugin::kKeyType_Offline) {
+        hKeyType = KeyType::OFFLINE;
+    } else if (keyType == DrmPlugin::kKeyType_Release) {
+        hKeyType = KeyType::RELEASE;
+    } else {
+        return BAD_VALUE;
+    }
+
+    ::KeyedVector hOptionalParameters = toHidlKeyedVector(optionalParameters);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->getKeyRequest(toHidlVec(sessionId),
+            toHidlVec(initData), toHidlString(mimeType), hKeyType, hOptionalParameters,
+            [&](Status status, const hidl_vec<uint8_t>& hRequest,
+                    KeyRequestType hKeyRequestType, const hidl_string& hDefaultUrl) {
+
+                if (status == Status::OK) {
+                    request = toVector(hRequest);
+                    defaultUrl = toString8(hDefaultUrl);
+
+                    switch (hKeyRequestType) {
+                    case KeyRequestType::INITIAL:
+                        *keyRequestType = DrmPlugin::kKeyRequestType_Initial;
+                        break;
+                    case KeyRequestType::RENEWAL:
+                        *keyRequestType = DrmPlugin::kKeyRequestType_Renewal;
+                        break;
+                    case KeyRequestType::RELEASE:
+                        *keyRequestType = DrmPlugin::kKeyRequestType_Release;
+                        break;
+                    default:
+                        *keyRequestType = DrmPlugin::kKeyRequestType_Unknown;
+                        break;
+                    }
+                    err = toStatusT(status);
+                }
+            });
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::provideKeyResponse(Vector<uint8_t> const &sessionId,
+        Vector<uint8_t> const &response, Vector<uint8_t> &keySetId) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->provideKeyResponse(toHidlVec(sessionId),
+            toHidlVec(response),
+            [&](Status status, const hidl_vec<uint8_t>& hKeySetId) {
+                if (status == Status::OK) {
+                    keySetId = toVector(hKeySetId);
+                }
+                err = toStatusT(status);
+            }
+        );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::removeKeys(Vector<uint8_t> const &keySetId) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    return toStatusT(mPlugin->removeKeys(toHidlVec(keySetId)));
+}
+
+status_t DrmHal::restoreKeys(Vector<uint8_t> const &sessionId,
+        Vector<uint8_t> const &keySetId) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    return toStatusT(mPlugin->restoreKeys(toHidlVec(sessionId),
+                    toHidlVec(keySetId)));
+}
+
+status_t DrmHal::queryKeyStatus(Vector<uint8_t> const &sessionId,
+        KeyedVector<String8, String8> &infoMap) const {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    ::KeyedVector hInfoMap;
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->queryKeyStatus(toHidlVec(sessionId),
+            [&](Status status, const hidl_vec<KeyValue>& map) {
+                if (status == Status::OK) {
+                    infoMap = toKeyedVector(map);
+                }
+                err = toStatusT(status);
+            }
+        );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::getProvisionRequest(String8 const &certType,
+        String8 const &certAuthority, Vector<uint8_t> &request,
+        String8 &defaultUrl) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->getProvisionRequest(
+            toHidlString(certType), toHidlString(certAuthority),
+            [&](Status status, const hidl_vec<uint8_t>& hRequest,
+                    const hidl_string& hDefaultUrl) {
+                if (status == Status::OK) {
+                    request = toVector(hRequest);
+                    defaultUrl = toString8(hDefaultUrl);
+                }
+                err = toStatusT(status);
+            }
+        );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::provideProvisionResponse(Vector<uint8_t> const &response,
+        Vector<uint8_t> &certificate, Vector<uint8_t> &wrappedKey) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->provideProvisionResponse(toHidlVec(response),
+            [&](Status status, const hidl_vec<uint8_t>& hCertificate,
+                    const hidl_vec<uint8_t>& hWrappedKey) {
+                if (status == Status::OK) {
+                    certificate = toVector(hCertificate);
+                    wrappedKey = toVector(hWrappedKey);
+                }
+                err = toStatusT(status);
+            }
+        );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::getSecureStops(List<Vector<uint8_t> > &secureStops) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->getSecureStops(
+            [&](Status status, const hidl_vec<SecureStop>& hSecureStops) {
+                if (status == Status::OK) {
+                    secureStops = toSecureStops(hSecureStops);
+                }
+                err = toStatusT(status);
+            }
+    );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+
+status_t DrmHal::getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->getSecureStop(toHidlVec(ssid),
+            [&](Status status, const SecureStop& hSecureStop) {
+                if (status == Status::OK) {
+                    secureStop = toVector(hSecureStop.opaqueData);
+                }
+                err = toStatusT(status);
+            }
+    );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::releaseSecureStops(Vector<uint8_t> const &ssRelease) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    return toStatusT(mPlugin->releaseSecureStop(toHidlVec(ssRelease)));
+}
+
+status_t DrmHal::releaseAllSecureStops() {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    return toStatusT(mPlugin->releaseAllSecureStops());
+}
+
+status_t DrmHal::getPropertyString(String8 const &name, String8 &value ) const {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->getPropertyString(toHidlString(name),
+            [&](Status status, const hidl_string& hValue) {
+                if (status == Status::OK) {
+                    value = toString8(hValue);
+                }
+                err = toStatusT(status);
+            }
+    );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::getPropertyByteArray(String8 const &name, Vector<uint8_t> &value ) const {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->getPropertyByteArray(toHidlString(name),
+            [&](Status status, const hidl_vec<uint8_t>& hValue) {
+                if (status == Status::OK) {
+                    value = toVector(hValue);
+                }
+                err = toStatusT(status);
+            }
+    );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::setPropertyString(String8 const &name, String8 const &value ) const {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    Status status =  mPlugin->setPropertyString(toHidlString(name),
+            toHidlString(value));
+    return toStatusT(status);
+}
+
+status_t DrmHal::setPropertyByteArray(String8 const &name,
+                                   Vector<uint8_t> const &value ) const {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    Status status = mPlugin->setPropertyByteArray(toHidlString(name),
+            toHidlVec(value));
+    return toStatusT(status);
+}
+
+
+status_t DrmHal::setCipherAlgorithm(Vector<uint8_t> const &sessionId,
+                                 String8 const &algorithm) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    Status status = mPlugin->setCipherAlgorithm(toHidlVec(sessionId),
+            toHidlString(algorithm));
+    return toStatusT(status);
+}
+
+status_t DrmHal::setMacAlgorithm(Vector<uint8_t> const &sessionId,
+                              String8 const &algorithm) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    Status status = mPlugin->setMacAlgorithm(toHidlVec(sessionId),
+            toHidlString(algorithm));
+    return toStatusT(status);
+}
+
+status_t DrmHal::encrypt(Vector<uint8_t> const &sessionId,
+        Vector<uint8_t> const &keyId, Vector<uint8_t> const &input,
+        Vector<uint8_t> const &iv, Vector<uint8_t> &output) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->encrypt(toHidlVec(sessionId),
+            toHidlVec(keyId), toHidlVec(input), toHidlVec(iv),
+            [&](Status status, const hidl_vec<uint8_t>& hOutput) {
+                if (status == Status::OK) {
+                    output = toVector(hOutput);
+                }
+                err = toStatusT(status);
+            }
+    );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::decrypt(Vector<uint8_t> const &sessionId,
+        Vector<uint8_t> const &keyId, Vector<uint8_t> const &input,
+        Vector<uint8_t> const &iv, Vector<uint8_t> &output) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t  err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->decrypt(toHidlVec(sessionId),
+            toHidlVec(keyId), toHidlVec(input), toHidlVec(iv),
+            [&](Status status, const hidl_vec<uint8_t>& hOutput) {
+                if (status == Status::OK) {
+                    output = toVector(hOutput);
+                }
+                err = toStatusT(status);
+            }
+    );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::sign(Vector<uint8_t> const &sessionId,
+        Vector<uint8_t> const &keyId, Vector<uint8_t> const &message,
+        Vector<uint8_t> &signature) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->sign(toHidlVec(sessionId),
+            toHidlVec(keyId), toHidlVec(message),
+            [&](Status status, const hidl_vec<uint8_t>& hSignature)  {
+                if (status == Status::OK) {
+                    signature = toVector(hSignature);
+                }
+                err = toStatusT(status);
+            }
+    );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::verify(Vector<uint8_t> const &sessionId,
+        Vector<uint8_t> const &keyId, Vector<uint8_t> const &message,
+        Vector<uint8_t> const &signature, bool &match) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->verify(toHidlVec(sessionId),toHidlVec(keyId),
+            toHidlVec(message), toHidlVec(signature),
+            [&](Status status, bool hMatch) {
+                if (status == Status::OK) {
+                    match = hMatch;
+                } else {
+                    match = false;
+                }
+                err = toStatusT(status);
+            }
+    );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::signRSA(Vector<uint8_t> const &sessionId,
+        String8 const &algorithm, Vector<uint8_t> const &message,
+        Vector<uint8_t> const &wrappedKey, Vector<uint8_t> &signature) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    if (!checkPermission("android.permission.ACCESS_DRM_CERTIFICATES")) {
+        return -EPERM;
+    }
+
+    DrmSessionManager::Instance()->useSession(sessionId);
+
+    status_t err = UNKNOWN_ERROR;
+
+    Return<void> hResult = mPlugin->signRSA(toHidlVec(sessionId),
+            toHidlString(algorithm), toHidlVec(message), toHidlVec(wrappedKey),
+            [&](Status status, const hidl_vec<uint8_t>& hSignature) {
+                if (status == Status::OK) {
+                    signature = toVector(hSignature);
+                }
+                err = toStatusT(status);
+            }
+        );
+
+    return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+void DrmHal::binderDied(const wp<IBinder> &the_late_who __unused)
+{
+    mEventLock.lock();
+    mListener.clear();
+    mEventLock.unlock();
+
+    Mutex::Autolock autoLock(mLock);
+    mPlugin.clear();
+}
+
+void DrmHal::writeByteArray(Parcel &obj, hidl_vec<uint8_t> const &vec)
+{
+    if (vec.size()) {
+        obj.writeInt32(vec.size());
+        obj.write(vec.data(), vec.size());
+    } else {
+        obj.writeInt32(0);
+    }
+}
+
+}  // namespace android
diff --git a/drm/libmediadrm/ICrypto.cpp b/drm/libmediadrm/ICrypto.cpp
index 8ba80c6..10e6bc3 100644
--- a/drm/libmediadrm/ICrypto.cpp
+++ b/drm/libmediadrm/ICrypto.cpp
@@ -94,18 +94,13 @@
         return reply.readInt32() != 0;
     }
 
-    virtual ssize_t decrypt(
-            DestinationType dstType,
-            const uint8_t key[16],
-            const uint8_t iv[16],
+    virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
             CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
-            const sp<IMemory> &sharedBuffer, size_t offset,
+            const sp<IMemory> &source, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
-            void *dstPtr,
-            AString *errorDetailMsg) {
+            const DestinationBuffer &destination, AString *errorDetailMsg) {
         Parcel data, reply;
         data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
-        data.writeInt32((int32_t)dstType);
         data.writeInt32(mode);
         data.writeInt32(pattern.mEncryptBlocks);
         data.writeInt32(pattern.mSkipBlocks);
@@ -130,18 +125,23 @@
         }
 
         data.writeInt32(totalSize);
-        data.writeStrongBinder(IInterface::asBinder(sharedBuffer));
+        data.writeStrongBinder(IInterface::asBinder(source));
         data.writeInt32(offset);
 
         data.writeInt32(numSubSamples);
         data.write(subSamples, sizeof(CryptoPlugin::SubSample) * numSubSamples);
 
-        if (dstType == kDestinationTypeNativeHandle) {
-            data.writeNativeHandle(static_cast<native_handle_t *>(dstPtr));
-        } else if (dstType == kDestinationTypeOpaqueHandle) {
-            data.writeInt64(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(dstPtr)));
+        data.writeInt32((int32_t)destination.mType);
+        if (destination.mType == kDestinationTypeNativeHandle) {
+            if (destination.mHandle == NULL) {
+                return BAD_VALUE;
+            }
+            data.writeNativeHandle(destination.mHandle);
         } else {
-            dstType = kDestinationTypeVmPointer;
+            if (destination.mSharedMemory == NULL) {
+                return BAD_VALUE;
+            }
+            data.writeStrongBinder(IInterface::asBinder(destination.mSharedMemory));
         }
 
         remote()->transact(DECRYPT, data, &reply);
@@ -150,10 +150,6 @@
 
         if (isCryptoError(result)) {
             errorDetailMsg->setTo(reply.readCString());
-        } else if (dstType == kDestinationTypeVmPointer) {
-            // For the non-secure case, copy the decrypted
-            // data from shared memory to its final destination
-            memcpy(dstPtr, sharedBuffer->pointer(), result);
         }
 
         return result;
@@ -280,7 +276,6 @@
         {
             CHECK_INTERFACE(ICrypto, data, reply);
 
-            DestinationType dstType = (DestinationType)data.readInt32();
             CryptoPlugin::Mode mode = (CryptoPlugin::Mode)data.readInt32();
             CryptoPlugin::Pattern pattern;
             pattern.mEncryptBlocks = data.readInt32();
@@ -293,9 +288,9 @@
             data.read(iv, sizeof(iv));
 
             size_t totalSize = data.readInt32();
-            sp<IMemory> sharedBuffer =
+            sp<IMemory> source =
                 interface_cast<IMemory>(data.readStrongBinder());
-            if (sharedBuffer == NULL) {
+            if (source == NULL) {
                 reply->writeInt32(BAD_VALUE);
                 return OK;
             }
@@ -308,23 +303,26 @@
             }
 
             CryptoPlugin::SubSample *subSamples =
-                new CryptoPlugin::SubSample[numSubSamples];
+                    new CryptoPlugin::SubSample[numSubSamples];
 
-            data.read(
-                    subSamples,
+            data.read(subSamples,
                     sizeof(CryptoPlugin::SubSample) * numSubSamples);
 
-            native_handle_t *nativeHandle = NULL;
-            void *secureBufferId = NULL, *dstPtr;
-            if (dstType == kDestinationTypeNativeHandle) {
-                nativeHandle = data.readNativeHandle();
-                dstPtr = static_cast<void *>(nativeHandle);
-            } else if (dstType == kDestinationTypeOpaqueHandle) {
-                secureBufferId = reinterpret_cast<void *>(static_cast<uintptr_t>(data.readInt64()));
-                dstPtr = secureBufferId;
-            } else {
-                dstType = kDestinationTypeVmPointer;
-                dstPtr = malloc(totalSize);
+            DestinationBuffer destination;
+            destination.mType = (DestinationType)data.readInt32();
+            if (destination.mType == kDestinationTypeNativeHandle) {
+                destination.mHandle = data.readNativeHandle();
+                if (destination.mHandle == NULL) {
+                    reply->writeInt32(BAD_VALUE);
+                    return OK;
+                }
+            } else if (destination.mType == kDestinationTypeSharedMemory) {
+                destination.mSharedMemory =
+                        interface_cast<IMemory>(data.readStrongBinder());
+                if (destination.mSharedMemory == NULL) {
+                    reply->writeInt32(BAD_VALUE);
+                    return OK;
+                }
             }
 
             AString errorDetailMsg;
@@ -348,20 +346,13 @@
 
             if (overflow || sumSubsampleSizes != totalSize) {
                 result = -EINVAL;
-            } else if (totalSize > sharedBuffer->size()) {
+            } else if (totalSize > source->size()) {
                 result = -EINVAL;
-            } else if ((size_t)offset > sharedBuffer->size() - totalSize) {
+            } else if ((size_t)offset > source->size() - totalSize) {
                 result = -EINVAL;
             } else {
-                result = decrypt(
-                    dstType,
-                    key,
-                    iv,
-                    mode, pattern,
-                    sharedBuffer, offset,
-                    subSamples, numSubSamples,
-                    dstPtr,
-                    &errorDetailMsg);
+                result = decrypt(key, iv, mode, pattern, source, offset,
+                        subSamples, numSubSamples, destination, &errorDetailMsg);
             }
 
             reply->writeInt32(result);
@@ -370,23 +361,12 @@
                 reply->writeCString(errorDetailMsg.c_str());
             }
 
-            if (dstType == kDestinationTypeVmPointer) {
-                if (result >= 0) {
-                    CHECK_LE(result, static_cast<ssize_t>(totalSize));
-                    // For the non-secure case, pass the decrypted
-                    // data back via the shared buffer rather than
-                    // copying it separately over binder to avoid
-                    // binder's 1MB limit.
-                    memcpy(sharedBuffer->pointer(), dstPtr, result);
-                }
-                free(dstPtr);
-                dstPtr = NULL;
-            } else if (dstType == kDestinationTypeNativeHandle) {
+            if (destination.mType == kDestinationTypeNativeHandle) {
                 int err;
-                if ((err = native_handle_close(nativeHandle)) < 0) {
+                if ((err = native_handle_close(destination.mHandle)) < 0) {
                     ALOGW("secure buffer native_handle_close failed: %d", err);
                 }
-                if ((err = native_handle_delete(nativeHandle)) < 0) {
+                if ((err = native_handle_delete(destination.mHandle)) < 0) {
                     ALOGW("secure buffer native_handle_delete failed: %d", err);
                 }
             }
diff --git a/drm/libmediadrm/IDrm.cpp b/drm/libmediadrm/IDrm.cpp
index c4558c6..4e47112 100644
--- a/drm/libmediadrm/IDrm.cpp
+++ b/drm/libmediadrm/IDrm.cpp
@@ -88,13 +88,15 @@
         return reply.readInt32() != 0;
     }
 
-    virtual status_t createPlugin(const uint8_t uuid[16]) {
+    virtual status_t createPlugin(const uint8_t uuid[16],
+                                  const String8& appPackageName) {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
         data.write(uuid, 16);
-
+        data.writeString8(appPackageName);
         status_t status = remote()->transact(CREATE_PLUGIN, data, &reply);
         if (status != OK) {
+            ALOGE("createPlugin: binder call failed: %d", status);
             return status;
         }
 
@@ -585,7 +587,6 @@
             data.read(uuid, sizeof(uuid));
             String8 mimeType = data.readString8();
             reply->writeInt32(isCryptoSchemeSupported(uuid, mimeType));
-
             return OK;
         }
 
@@ -594,7 +595,8 @@
             CHECK_INTERFACE(IDrm, data, reply);
             uint8_t uuid[16];
             data.read(uuid, sizeof(uuid));
-            reply->writeInt32(createPlugin(uuid));
+            String8 appPackageName = data.readString8();
+            reply->writeInt32(createPlugin(uuid, appPackageName));
             return OK;
         }
 
diff --git a/drm/mediadrm/plugins/clearkey/DrmFactory.cpp b/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
index d27956c..c83321b 100644
--- a/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
@@ -44,7 +44,8 @@
 }
 
 android::status_t DrmFactory::createDrmPlugin(
-        const uint8_t uuid[16], android::DrmPlugin** plugin) {
+        const uint8_t uuid[16],
+        android::DrmPlugin** plugin) {
     if (!isCryptoSchemeSupported(uuid)) {
         *plugin = NULL;
         return android::BAD_VALUE;
diff --git a/drm/mediadrm/plugins/clearkey/DrmFactory.h b/drm/mediadrm/plugins/clearkey/DrmFactory.h
index 87db982..0bc0843 100644
--- a/drm/mediadrm/plugins/clearkey/DrmFactory.h
+++ b/drm/mediadrm/plugins/clearkey/DrmFactory.h
@@ -35,7 +35,8 @@
     virtual bool isContentTypeSupported(const android::String8 &mimeType);
 
     virtual android::status_t createDrmPlugin(
-            const uint8_t uuid[16], android::DrmPlugin** plugin);
+            const uint8_t uuid[16],
+            android::DrmPlugin** plugin);
 
 private:
     DISALLOW_EVIL_CONSTRUCTORS(DrmFactory);
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
index a38cca9..c82b9d9 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
@@ -56,7 +56,8 @@
         return true;
     }
 
-    status_t MockDrmFactory::createDrmPlugin(const uint8_t /* uuid */[16], DrmPlugin **plugin)
+    status_t MockDrmFactory::createDrmPlugin(const uint8_t /* uuid */[16],
+                                             DrmPlugin **plugin)
     {
         *plugin = new MockDrmPlugin();
         return OK;
@@ -729,7 +730,7 @@
 
     ssize_t MockDrmPlugin::findSession(Vector<uint8_t> const &sessionId) const
     {
-        ALOGD("findSession: nsessions=%d, size=%d", mSessions.size(), sessionId.size());
+        ALOGD("findSession: nsessions=%u, size=%u", mSessions.size(), sessionId.size());
         for (size_t i = 0; i < mSessions.size(); ++i) {
             if (memcmp(mSessions[i].array(), sessionId.array(), sessionId.size()) == 0) {
                 return i;
@@ -740,7 +741,7 @@
 
     ssize_t MockDrmPlugin::findKeySet(Vector<uint8_t> const &keySetId) const
     {
-        ALOGD("findKeySet: nkeySets=%d, size=%d", mKeySets.size(), keySetId.size());
+        ALOGD("findKeySet: nkeySets=%u, size=%u", mKeySets.size(), keySetId.size());
         for (size_t i = 0; i < mKeySets.size(); ++i) {
             if (memcmp(mKeySets[i].array(), keySetId.array(), keySetId.size()) == 0) {
                 return i;
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
index 98bdd69..9f8db17 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
@@ -33,7 +33,8 @@
 
         bool isCryptoSchemeSupported(const uint8_t uuid[16]);
         bool isContentTypeSupported(const String8 &mimeType);
-        status_t createDrmPlugin(const uint8_t uuid[16], DrmPlugin **plugin);
+        status_t createDrmPlugin(const uint8_t uuid[16],
+                                 DrmPlugin **plugin);
     };
 
     class MockCryptoFactory : public CryptoFactory {
diff --git a/include/camera/CameraBase.h b/include/camera/CameraBase.h
index 03fbdfd..74a2dce 100644
--- a/include/camera/CameraBase.h
+++ b/include/camera/CameraBase.h
@@ -141,7 +141,7 @@
     virtual void                     binderDied(const wp<IBinder>& who);
 
     // helper function to obtain camera service handle
-    static const sp<::android::hardware::ICameraService>& getCameraService();
+    static const sp<::android::hardware::ICameraService> getCameraService();
 
     sp<TCamUser>                     mCamera;
     status_t                         mStatus;
diff --git a/include/camera/camera2/OutputConfiguration.h b/include/camera/camera2/OutputConfiguration.h
index cb04c0e..8e641c7 100644
--- a/include/camera/camera2/OutputConfiguration.h
+++ b/include/camera/camera2/OutputConfiguration.h
@@ -38,12 +38,14 @@
         SURFACE_TYPE_SURFACE_VIEW = 0,
         SURFACE_TYPE_SURFACE_TEXTURE = 1
     };
-    sp<IGraphicBufferProducer> getGraphicBufferProducer() const;
+    const std::vector<sp<IGraphicBufferProducer>>& getGraphicBufferProducers() const;
     int                        getRotation() const;
     int                        getSurfaceSetID() const;
     int                        getSurfaceType() const;
     int                        getWidth() const;
     int                        getHeight() const;
+    bool                       isDeferred() const;
+    bool                       isShared() const;
     /**
      * Keep impl up-to-date with OutputConfiguration.java in frameworks/base
      */
@@ -65,19 +67,20 @@
             int surfaceSetID = INVALID_SET_ID);
 
     bool operator == (const OutputConfiguration& other) const {
-        return (mGbp == other.mGbp &&
-                mRotation == other.mRotation &&
+        return ( mRotation == other.mRotation &&
                 mSurfaceSetID == other.mSurfaceSetID &&
                 mSurfaceType == other.mSurfaceType &&
                 mWidth == other.mWidth &&
-                mHeight == other.mHeight);
+                mHeight == other.mHeight &&
+                mIsDeferred == other.mIsDeferred &&
+                mIsShared == other.mIsShared &&
+                gbpsEqual(other));
     }
     bool operator != (const OutputConfiguration& other) const {
         return !(*this == other);
     }
     bool operator < (const OutputConfiguration& other) const {
         if (*this == other) return false;
-        if (mGbp != other.mGbp) return mGbp < other.mGbp;
         if (mSurfaceSetID != other.mSurfaceSetID) {
             return mSurfaceSetID < other.mSurfaceSetID;
         }
@@ -90,20 +93,32 @@
         if (mHeight != other.mHeight) {
             return mHeight < other.mHeight;
         }
-
-        return mRotation < other.mRotation;
+        if (mRotation != other.mRotation) {
+            return mRotation < other.mRotation;
+        }
+        if (mIsDeferred != other.mIsDeferred) {
+            return mIsDeferred < other.mIsDeferred;
+        }
+        if (mIsShared != other.mIsShared) {
+            return mIsShared < other.mIsShared;
+        }
+        return gbpsLessThan(other);
     }
     bool operator > (const OutputConfiguration& other) const {
         return (*this != other && !(*this < other));
     }
 
+    bool gbpsEqual(const OutputConfiguration& other) const;
+    bool gbpsLessThan(const OutputConfiguration& other) const;
 private:
-    sp<IGraphicBufferProducer> mGbp;
+    std::vector<sp<IGraphicBufferProducer>> mGbps;
     int                        mRotation;
     int                        mSurfaceSetID;
     int                        mSurfaceType;
     int                        mWidth;
     int                        mHeight;
+    bool                       mIsDeferred;
+    bool                       mIsShared;
     // helper function
     static String16 readMaybeEmptyString16(const android::Parcel* parcel);
 };
diff --git a/include/camera/ndk/NdkCameraMetadataTags.h b/include/camera/ndk/NdkCameraMetadataTags.h
index 0fec983..ced6034 100644
--- a/include/camera/ndk/NdkCameraMetadataTags.h
+++ b/include/camera/ndk/NdkCameraMetadataTags.h
@@ -1515,6 +1515,40 @@
      */
     ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST =                // int32
             ACAMERA_CONTROL_START + 40,
+    /**
+     * <p>Allow camera device to enable zero-shutter-lag mode for requests with
+     * ACAMERA_CONTROL_CAPTURE_INTENT == STILL_CAPTURE.</p>
+     *
+     * @see ACAMERA_CONTROL_CAPTURE_INTENT
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>If enableZsl is <code>true</code>, the camera device may enable zero-shutter-lag mode for requests with
+     * STILL_CAPTURE capture intent. The camera device may use images captured in the past to
+     * produce output images for a zero-shutter-lag request. The result metadata including the
+     * ACAMERA_SENSOR_TIMESTAMP reflects the source frames used to produce output images.
+     * Therefore, the contents of the output images and the result metadata may be out of order
+     * compared to previous regular requests. enableZsl does not affect requests with other
+     * capture intents.</p>
+     * <p>For example, when requests are submitted in the following order:
+     *   Request A: enableZsl is <code>true</code>, ACAMERA_CONTROL_CAPTURE_INTENT is PREVIEW
+     *   Request B: enableZsl is <code>true</code>, ACAMERA_CONTROL_CAPTURE_INTENT is STILL_CAPTURE</p>
+     * <p>The output images for request B may have contents captured before the output images for
+     * request A, and the result metadata for request B may be older than the result metadata for
+     * request A.</p>
+     * <p>Note that when enableZsl is <code>true</code>, it is not guaranteed to get output images captured in the
+     * past for requests with STILL_CAPTURE capture intent.</p>
+     * <p>The value of enableZsl in capture templates is always <code>false</code> if present.</p>
+     *
+     * @see ACAMERA_CONTROL_CAPTURE_INTENT
+     * @see ACAMERA_SENSOR_TIMESTAMP
+     */
+    ACAMERA_CONTROL_ENABLE_ZSL =                                // byte (enum)
+            ACAMERA_CONTROL_START + 41,
     ACAMERA_CONTROL_END,
 
     /**
@@ -5762,6 +5796,26 @@
 
 } acamera_metadata_enum_android_control_awb_lock_available_t;
 
+// ACAMERA_CONTROL_ENABLE_ZSL
+typedef enum acamera_metadata_enum_acamera_control_enable_zsl {
+    /**
+     * <p>Requests with ACAMERA_CONTROL_CAPTURE_INTENT == STILL_CAPTURE must be captured
+     * after previous requests.</p>
+     *
+     * @see ACAMERA_CONTROL_CAPTURE_INTENT
+     */
+    ACAMERA_CONTROL_ENABLE_ZSL_FALSE                                 = 0,
+
+    /**
+     * <p>Requests with ACAMERA_CONTROL_CAPTURE_INTENT == STILL_CAPTURE may or may not be
+     * captured before previous requests.</p>
+     *
+     * @see ACAMERA_CONTROL_CAPTURE_INTENT
+     */
+    ACAMERA_CONTROL_ENABLE_ZSL_TRUE                                  = 1,
+
+} acamera_metadata_enum_android_control_enable_zsl_t;
+
 
 
 // ACAMERA_EDGE_MODE
diff --git a/include/media/BufferProviders.h b/include/media/BufferProviders.h
index 68b3f23..9d026f6 100644
--- a/include/media/BufferProviders.h
+++ b/include/media/BufferProviders.h
@@ -21,6 +21,7 @@
 #include <sys/types.h>
 
 #include <media/AudioBufferProvider.h>
+#include <media/AudioResamplerPublic.h>
 #include <system/audio.h>
 #include <system/audio_effect.h>
 #include <utils/StrongPointer.h>
@@ -112,6 +113,8 @@
 protected:
     sp<EffectsFactoryHalInterface> mEffectsFactory;
     sp<EffectHalInterface> mDownmixInterface;
+    size_t mInFrameSize;
+    size_t mOutFrameSize;
     sp<EffectBufferHalInterface> mInBuffer;
     sp<EffectBufferHalInterface> mOutBuffer;
     effect_config_t    mDownmixConfig;
diff --git a/include/media/BufferingSettings.h b/include/media/BufferingSettings.h
index 7dd9d40..e812d2a 100644
--- a/include/media/BufferingSettings.h
+++ b/include/media/BufferingSettings.h
@@ -40,6 +40,8 @@
     static const int kNoWatermark = -1;
 
     static bool IsValidBufferingMode(int mode);
+    static bool IsTimeBasedBufferingMode(int mode);
+    static bool IsSizeBasedBufferingMode(int mode);
 
     BufferingMode mInitialBufferingMode;  // for prepare
     BufferingMode mRebufferingMode;  // for playback
@@ -64,6 +66,7 @@
     status_t writeToParcel(Parcel* parcel) const override;
     status_t readFromParcel(const Parcel* parcel) override;
 
+    String8 toString() const;
 };
 
 } // namespace android
diff --git a/include/media/Crypto.h b/include/media/Crypto.h
index 7d181d3..ce08f98 100644
--- a/include/media/Crypto.h
+++ b/include/media/Crypto.h
@@ -49,16 +49,11 @@
 
     virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
 
-    virtual ssize_t decrypt(
-            DestinationType dstType,
-            const uint8_t key[16],
-            const uint8_t iv[16],
-            CryptoPlugin::Mode mode,
-            const CryptoPlugin::Pattern &pattern,
-            const sp<IMemory> &sharedBuffer, size_t offset,
+    virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
+            CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
+            const sp<IMemory> &source, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
-            void *dstPtr,
-            AString *errorDetailMsg);
+            const DestinationBuffer &destination, AString *errorDetailMsg);
 
 private:
     mutable Mutex mLock;
diff --git a/include/media/CryptoHal.h b/include/media/CryptoHal.h
new file mode 100644
index 0000000..1ace957
--- /dev/null
+++ b/include/media/CryptoHal.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CRYPTO_HAL_H_
+
+#define CRYPTO_HAL_H_
+
+#include <android/hardware/drm/1.0/ICryptoFactory.h>
+#include <android/hardware/drm/1.0/ICryptoPlugin.h>
+#include <media/ICrypto.h>
+#include <utils/KeyedVector.h>
+#include <utils/threads.h>
+
+#include "SharedLibrary.h"
+
+namespace android {
+
+struct CryptoHal : public BnCrypto {
+    CryptoHal();
+    virtual ~CryptoHal();
+
+    virtual status_t initCheck() const;
+
+    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]);
+
+    virtual status_t createPlugin(
+            const uint8_t uuid[16], const void *data, size_t size);
+
+    virtual status_t destroyPlugin();
+
+    virtual bool requiresSecureDecoderComponent(
+            const char *mime) const;
+
+    virtual void notifyResolution(uint32_t width, uint32_t height);
+
+    virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
+
+    virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
+            CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
+            const sp<IMemory> &source, size_t offset,
+            const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+            const ICrypto::DestinationBuffer &destination,
+            AString *errorDetailMsg);
+
+private:
+    mutable Mutex mLock;
+
+    sp<SharedLibrary> mLibrary;
+    sp<::android::hardware::drm::V1_0::ICryptoFactory> mFactory;
+    sp<::android::hardware::drm::V1_0::ICryptoPlugin> mPlugin;
+
+    /**
+     * mInitCheck is:
+     *   NO_INIT if a plugin hasn't been created yet
+     *   ERROR_UNSUPPORTED if a plugin can't be created for the uuid
+     *   OK after a plugin has been created and mPlugin is valid
+     */
+    status_t mInitCheck;
+
+    void *mHeapBase;
+
+    sp<::android::hardware::drm::V1_0::ICryptoFactory>
+            makeCryptoFactory();
+    sp<::android::hardware::drm::V1_0::ICryptoPlugin>
+            makeCryptoPlugin(const uint8_t uuid[16], const void *initData,
+                size_t size);
+
+    status_t setHeapBase(const sp<IMemory> &sharedBuffer);
+
+    DISALLOW_EVIL_CONSTRUCTORS(CryptoHal);
+};
+
+}  // namespace android
+
+#endif  // CRYPTO_HAL_H_
diff --git a/include/media/Drm.h b/include/media/Drm.h
index d40019b..fc869cc 100644
--- a/include/media/Drm.h
+++ b/include/media/Drm.h
@@ -40,7 +40,7 @@
 
     virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType);
 
-    virtual status_t createPlugin(const uint8_t uuid[16]);
+    virtual status_t createPlugin(const uint8_t uuid[16], const String8 &appPackageName);
 
     virtual status_t destroyPlugin();
 
diff --git a/include/media/DrmHal.h b/include/media/DrmHal.h
new file mode 100644
index 0000000..82d2555
--- /dev/null
+++ b/include/media/DrmHal.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DRM_HAL_H_
+
+#define DRM_HAL_H_
+
+#include <android/hardware/drm/1.0/IDrmPlugin.h>
+#include <android/hardware/drm/1.0/IDrmPluginListener.h>
+#include <android/hardware/drm/1.0/IDrmFactory.h>
+
+#include <media/IDrm.h>
+#include <media/IDrmClient.h>
+#include <utils/threads.h>
+
+using ::android::hardware::drm::V1_0::EventType;
+using ::android::hardware::drm::V1_0::IDrmFactory;
+using ::android::hardware::drm::V1_0::IDrmPlugin;
+using ::android::hardware::drm::V1_0::IDrmPluginListener;
+using ::android::hardware::drm::V1_0::KeyStatus;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+namespace android {
+
+struct DrmSessionClientInterface;
+
+struct DrmHal : public BnDrm,
+             public IBinder::DeathRecipient,
+             public IDrmPluginListener {
+    DrmHal();
+    virtual ~DrmHal();
+
+    virtual status_t initCheck() const;
+
+    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType);
+
+    virtual status_t createPlugin(const uint8_t uuid[16],
+                                  const String8 &appPackageName);
+
+    virtual status_t destroyPlugin();
+
+    virtual status_t openSession(Vector<uint8_t> &sessionId);
+
+    virtual status_t closeSession(Vector<uint8_t> const &sessionId);
+
+    virtual status_t
+        getKeyRequest(Vector<uint8_t> const &sessionId,
+                      Vector<uint8_t> const &initData,
+                      String8 const &mimeType, DrmPlugin::KeyType keyType,
+                      KeyedVector<String8, String8> const &optionalParameters,
+                      Vector<uint8_t> &request, String8 &defaultUrl,
+                      DrmPlugin::KeyRequestType *keyRequestType);
+
+    virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
+                                        Vector<uint8_t> const &response,
+                                        Vector<uint8_t> &keySetId);
+
+    virtual status_t removeKeys(Vector<uint8_t> const &keySetId);
+
+    virtual status_t restoreKeys(Vector<uint8_t> const &sessionId,
+                                 Vector<uint8_t> const &keySetId);
+
+    virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId,
+                                    KeyedVector<String8, String8> &infoMap) const;
+
+    virtual status_t getProvisionRequest(String8 const &certType,
+                                         String8 const &certAuthority,
+                                         Vector<uint8_t> &request,
+                                         String8 &defaulUrl);
+
+    virtual status_t provideProvisionResponse(Vector<uint8_t> const &response,
+                                              Vector<uint8_t> &certificate,
+                                              Vector<uint8_t> &wrappedKey);
+
+    virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops);
+    virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
+
+    virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
+    virtual status_t releaseAllSecureStops();
+
+    virtual status_t getPropertyString(String8 const &name, String8 &value ) const;
+    virtual status_t getPropertyByteArray(String8 const &name,
+                                          Vector<uint8_t> &value ) const;
+    virtual status_t setPropertyString(String8 const &name, String8 const &value ) const;
+    virtual status_t setPropertyByteArray(String8 const &name,
+                                          Vector<uint8_t> const &value ) const;
+
+    virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
+                                        String8 const &algorithm);
+
+    virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId,
+                                     String8 const &algorithm);
+
+    virtual status_t encrypt(Vector<uint8_t> const &sessionId,
+                             Vector<uint8_t> const &keyId,
+                             Vector<uint8_t> const &input,
+                             Vector<uint8_t> const &iv,
+                             Vector<uint8_t> &output);
+
+    virtual status_t decrypt(Vector<uint8_t> const &sessionId,
+                             Vector<uint8_t> const &keyId,
+                             Vector<uint8_t> const &input,
+                             Vector<uint8_t> const &iv,
+                             Vector<uint8_t> &output);
+
+    virtual status_t sign(Vector<uint8_t> const &sessionId,
+                          Vector<uint8_t> const &keyId,
+                          Vector<uint8_t> const &message,
+                          Vector<uint8_t> &signature);
+
+    virtual status_t verify(Vector<uint8_t> const &sessionId,
+                            Vector<uint8_t> const &keyId,
+                            Vector<uint8_t> const &message,
+                            Vector<uint8_t> const &signature,
+                            bool &match);
+
+    virtual status_t signRSA(Vector<uint8_t> const &sessionId,
+                             String8 const &algorithm,
+                             Vector<uint8_t> const &message,
+                             Vector<uint8_t> const &wrappedKey,
+                             Vector<uint8_t> &signature);
+
+    virtual status_t setListener(const sp<IDrmClient>& listener);
+
+    // Methods of IDrmPluginListener
+    Return<void> sendEvent(EventType eventType,
+            const hidl_vec<uint8_t>& sessionId, const hidl_vec<uint8_t>& data);
+
+    Return<void> sendExpirationUpdate(const hidl_vec<uint8_t>& sessionId,
+            int64_t expiryTimeInMS);
+
+    Return<void> sendKeysChange(const hidl_vec<uint8_t>& sessionId,
+            const hidl_vec<KeyStatus>& keyStatusList, bool hasNewUsableKey);
+
+    virtual void binderDied(const wp<IBinder> &the_late_who);
+
+private:
+    static Mutex mLock;
+
+    sp<DrmSessionClientInterface> mDrmSessionClient;
+
+    sp<IDrmClient> mListener;
+    mutable Mutex mEventLock;
+    mutable Mutex mNotifyLock;
+
+    sp<IDrmFactory> mFactory;
+    sp<IDrmPlugin> mPlugin;
+
+    /**
+     * mInitCheck is:
+     *   NO_INIT if a plugin hasn't been created yet
+     *   ERROR_UNSUPPORTED if a plugin can't be created for the uuid
+     *   OK after a plugin has been created and mPlugin is valid
+     */
+    status_t mInitCheck;
+
+    sp<IDrmFactory> makeDrmFactory();
+    sp<IDrmPlugin> makeDrmPlugin(const uint8_t uuid[16],
+                                 const String8 &appPackageName);
+
+    void writeByteArray(Parcel &obj, const hidl_vec<uint8_t>& array);
+
+    DISALLOW_EVIL_CONSTRUCTORS(DrmHal);
+};
+
+}  // namespace android
+
+#endif  // DRM_HAL_H_
diff --git a/include/media/ICrypto.h b/include/media/ICrypto.h
index a4bfaf8..8990f4b 100644
--- a/include/media/ICrypto.h
+++ b/include/media/ICrypto.h
@@ -15,8 +15,9 @@
  */
 
 #include <binder/IInterface.h>
-#include <media/stagefright/foundation/ABase.h>
+#include <cutils/native_handle.h>
 #include <media/hardware/CryptoAPI.h>
+#include <media/stagefright/foundation/ABase.h>
 
 #ifndef ANDROID_ICRYPTO_H_
 
@@ -47,21 +48,21 @@
     virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId) = 0;
 
     enum DestinationType {
-        kDestinationTypeVmPointer,    // non-secure
-        kDestinationTypeOpaqueHandle, // secure
+        kDestinationTypeSharedMemory, // non-secure
         kDestinationTypeNativeHandle  // secure
     };
 
-    virtual ssize_t decrypt(
-            DestinationType dstType,
-            const uint8_t key[16],
-            const uint8_t iv[16],
-            CryptoPlugin::Mode mode,
-            const CryptoPlugin::Pattern &pattern,
-            const sp<IMemory> &sharedBuffer, size_t offset,
+    struct DestinationBuffer {
+        DestinationType mType;
+        native_handle_t *mHandle;
+        sp<IMemory> mSharedMemory;
+    };
+
+    virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
+            CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
+            const sp<IMemory> &source, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
-            void *dstPtr,
-            AString *errorDetailMsg) = 0;
+            const DestinationBuffer &destination, AString *errorDetailMsg) = 0;
 
 private:
     DISALLOW_EVIL_CONSTRUCTORS(ICrypto);
diff --git a/include/media/IDrm.h b/include/media/IDrm.h
index fd51fd0..a57e372 100644
--- a/include/media/IDrm.h
+++ b/include/media/IDrm.h
@@ -34,7 +34,8 @@
 
     virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) = 0;
 
-    virtual status_t createPlugin(const uint8_t uuid[16]) = 0;
+    virtual status_t createPlugin(const uint8_t uuid[16],
+                                  const String8 &appPackageName) = 0;
 
     virtual status_t destroyPlugin() = 0;
 
diff --git a/include/media/IMediaAnalyticsService.h b/include/media/IMediaAnalyticsService.h
index 9213637..f635e94 100644
--- a/include/media/IMediaAnalyticsService.h
+++ b/include/media/IMediaAnalyticsService.h
@@ -52,14 +52,6 @@
     // caller continues to own the passed item
     virtual MediaAnalyticsItem::SessionID_t submit(MediaAnalyticsItem *item, bool forcenew) = 0;
 
-
-    // return lists of records that match the supplied parameters.
-    // finished [or not] records since time 'ts' with key 'key'
-    // timestamp 'ts' is nanoseconds, unix time.
-    // caller responsible for deallocating returned data structures
-    virtual List<MediaAnalyticsItem *> *getMediaAnalyticsItemList(bool finished, int64_t ts) = 0;
-    virtual List<MediaAnalyticsItem *> *getMediaAnalyticsItemList(bool finished, int64_t ts, MediaAnalyticsItem::Key key) = 0;
-
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index f642373..ca865a8 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -24,6 +24,7 @@
 #include <system/audio.h>
 
 #include <media/IMediaSource.h>
+#include <media/drm/DrmAPI.h>   // for DrmPlugin::* enum
 
 // Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
 // global, and not in android::
@@ -89,6 +90,22 @@
     virtual status_t        setRetransmitEndpoint(const struct sockaddr_in* endpoint) = 0;
     virtual status_t        getRetransmitEndpoint(struct sockaddr_in* endpoint) = 0;
     virtual status_t        setNextPlayer(const sp<IMediaPlayer>& next) = 0;
+    // ModDrm
+    virtual status_t        prepareDrm(const uint8_t uuid[16], const int mode) = 0;
+    virtual status_t        releaseDrm() = 0;
+    virtual status_t        getKeyRequest(Vector<uint8_t> const& scope,
+                                    String8 const &mimeType,
+                                    DrmPlugin::KeyType keyType,
+                                    KeyedVector<String8, String8>& optionalParameters,
+                                    Vector<uint8_t>& request,
+                                    String8& defaultUrl,
+                                    DrmPlugin::KeyRequestType& keyRequestType) = 0;
+    virtual status_t        provideKeyResponse(Vector<uint8_t>& releaseKeySetId,
+                                    Vector<uint8_t>& response,
+                                    Vector<uint8_t>& keySetId) = 0;
+    virtual status_t        restoreKeys(Vector<uint8_t> const& keySetId) = 0;
+    virtual status_t        getDrmPropertyString(String8 const& name, String8& value) = 0;
+    virtual status_t        setDrmPropertyString(String8 const& name, String8 const& value) = 0;
 
     // Invoke a generic method on the player by using opaque parcels
     // for the request and reply.
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 3e05532..d5aec3f 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -44,7 +44,8 @@
     virtual status_t setOutputFormat(int of) = 0;
     virtual status_t setVideoEncoder(int ve) = 0;
     virtual status_t setAudioEncoder(int ae) = 0;
-    virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
+    virtual status_t setOutputFile(int fd) = 0;
+    virtual status_t setNextOutputFile(int fd) = 0;
     virtual status_t setVideoSize(int width, int height) = 0;
     virtual status_t setVideoFrameRate(int frames_per_second) = 0;
     virtual status_t setParameters(const String8& params) = 0;
diff --git a/include/media/MediaAnalyticsItem.h b/include/media/MediaAnalyticsItem.h
index a78aa8b..f050e7f 100644
--- a/include/media/MediaAnalyticsItem.h
+++ b/include/media/MediaAnalyticsItem.h
@@ -40,6 +40,7 @@
 
     friend class MediaAnalyticsService;
     friend class IMediaAnalyticsService;
+    friend class MediaMetricsJNI;
 
     public:
 
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 0e815cb..4e18f1f 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -280,6 +280,34 @@
         return INVALID_OPERATION;
     }
 
+    // ModDrm
+    virtual status_t prepareDrm(const uint8_t uuid[16], const int mode) {
+        return INVALID_OPERATION;
+    }
+    virtual status_t releaseDrm() {
+        return INVALID_OPERATION;
+    }
+    virtual status_t getKeyRequest(Vector<uint8_t> const& scope, String8 const& mimeType,
+                             DrmPlugin::KeyType keyType,
+                             KeyedVector<String8, String8>& optionalParameters,
+                             Vector<uint8_t>& request, String8& defaultUrl,
+                             DrmPlugin::KeyRequestType& keyRequestType) {
+        return INVALID_OPERATION;
+    }
+    virtual status_t provideKeyResponse(Vector<uint8_t>& releaseKeySetId,
+                             Vector<uint8_t>& response, Vector<uint8_t>& keySetId) {
+        return INVALID_OPERATION;
+    }
+    virtual status_t restoreKeys(Vector<uint8_t> const& keySetId) {
+        return INVALID_OPERATION;
+    }
+    virtual status_t getDrmPropertyString(String8 const& name, String8& value) {
+        return INVALID_OPERATION;
+    }
+    virtual status_t setDrmPropertyString(String8 const& name, String8 const& value) {
+        return INVALID_OPERATION;
+    }
+
 private:
     friend class MediaPlayerService;
 
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index b9105b1..c273da7 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -44,7 +44,8 @@
     virtual status_t setCamera(const sp<hardware::ICamera>& camera,
                                const sp<ICameraRecordingProxy>& proxy) = 0;
     virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
-    virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
+    virtual status_t setOutputFile(int fd) = 0;
+    virtual status_t setNextOutputFile(int fd) {return INVALID_OPERATION;}
     virtual status_t setOutputFileAuxiliary(int /*fd*/) {return INVALID_OPERATION;}
     virtual status_t setParameters(const String8& params) = 0;
     virtual status_t setListener(const sp<IMediaRecorderClient>& listener) = 0;
diff --git a/include/media/OMXBuffer.h b/include/media/OMXBuffer.h
index aeb1765..6f79182 100644
--- a/include/media/OMXBuffer.h
+++ b/include/media/OMXBuffer.h
@@ -21,6 +21,7 @@
 #include <media/IOMX.h>
 #include <system/window.h>
 #include <utils/StrongPointer.h>
+#include <hidl/HidlSupport.h>
 
 namespace android {
 
@@ -35,13 +36,21 @@
             ::android::OMXBuffer const& l);
     inline bool convertTo(::android::OMXBuffer* l,
             ::android::hardware::media::omx::V1_0::CodecBuffer const& t);
-}}}}}
+}
+namespace utils {
+    inline bool wrapAs(::android::hardware::media::omx::V1_0::CodecBuffer* t,
+            ::android::OMXBuffer const& l);
+    inline bool convertTo(::android::OMXBuffer* l,
+            ::android::hardware::media::omx::V1_0::CodecBuffer const& t);
+}
+}}}}
 
 class GraphicBuffer;
 class IMemory;
 class MediaCodecBuffer;
 class NativeHandle;
 struct OMXNodeInstance;
+using hardware::hidl_memory;
 
 // TODO: After complete HIDL transition, this class would be replaced by
 // CodecBuffer.
@@ -54,13 +63,14 @@
     // Default constructor, constructs a buffer of type kBufferTypeInvalid.
     OMXBuffer();
 
-    // Constructs a buffer of type kBufferTypePreset with mRangeLength set to
-    // |codecBuffer|'s size (or 0 if |codecBuffer| is NULL).
+    // Constructs a buffer of type kBufferTypePreset with mRangeOffset set to
+    // |codecBuffer|'s offset and mRangeLength set to |codecBuffer|'s size (or 0
+    // if |codecBuffer| is NULL).
     OMXBuffer(const sp<MediaCodecBuffer> &codecBuffer);
 
-    // Constructs a buffer of type kBufferTypePreset with a specified
-    // mRangeLength.
-    explicit OMXBuffer(OMX_U32 rangeLength);
+    // Constructs a buffer of type kBufferTypePreset with specified mRangeOffset
+    // and mRangeLength.
+    OMXBuffer(OMX_U32 rangeOffset, OMX_U32 rangeLength);
 
     // Constructs a buffer of type kBufferTypeSharedMem.
     OMXBuffer(const sp<IMemory> &mem);
@@ -71,6 +81,9 @@
     // Constructs a buffer of type kBufferTypeNativeHandle.
     OMXBuffer(const sp<NativeHandle> &handle);
 
+    // Constructs a buffer of type kBufferTypeHidlMemory.
+    OMXBuffer(const hidl_memory &hidlMemory);
+
     // Parcelling/Un-parcelling.
     status_t writeToParcel(Parcel *parcel) const;
     status_t readFromParcel(const Parcel *parcel);
@@ -87,13 +100,20 @@
     friend inline bool (::android::hardware::media::omx::V1_0::implementation::
             convertTo)(OMXBuffer* l,
             ::android::hardware::media::omx::V1_0::CodecBuffer const& t);
+    friend inline bool (::android::hardware::media::omx::V1_0::utils::
+            wrapAs)(::android::hardware::media::omx::V1_0::CodecBuffer* t,
+            OMXBuffer const& l);
+    friend inline bool (::android::hardware::media::omx::V1_0::utils::
+            convertTo)(OMXBuffer* l,
+            ::android::hardware::media::omx::V1_0::CodecBuffer const& t);
 
     enum BufferType {
         kBufferTypeInvalid = 0,
         kBufferTypePreset,
         kBufferTypeSharedMem,
-        kBufferTypeANWBuffer,
+        kBufferTypeANWBuffer, // Use only for non-Treble
         kBufferTypeNativeHandle,
+        kBufferTypeHidlMemory // Mapped to CodecBuffer::Type::SHARED_MEM.
     };
 
     BufferType mBufferType;
@@ -101,6 +121,7 @@
     // kBufferTypePreset
     // If the port is operating in byte buffer mode, mRangeLength is the valid
     // range length. Otherwise the range info should also be ignored.
+    OMX_U32 mRangeOffset;
     OMX_U32 mRangeLength;
 
     // kBufferTypeSharedMem
@@ -112,6 +133,9 @@
     // kBufferTypeNativeHandle
     sp<NativeHandle> mNativeHandle;
 
+    // kBufferTypeHidlMemory
+    hidl_memory mHidlMemory;
+
     // Move assignment
     OMXBuffer &operator=(OMXBuffer&&);
 
diff --git a/include/media/OMXFenceParcelable.h b/include/media/OMXFenceParcelable.h
index f529301..2a8da87 100644
--- a/include/media/OMXFenceParcelable.h
+++ b/include/media/OMXFenceParcelable.h
@@ -26,12 +26,20 @@
 // This is needed temporarily for the OMX HIDL transition.
 namespace hardware {
     struct hidl_handle;
-namespace media { namespace omx { namespace V1_0 { namespace implementation {
+namespace media { namespace omx { namespace V1_0 {
+namespace implementation {
     void wrapAs(::android::OMXFenceParcelable* l,
             ::android::hardware::hidl_handle const& t);
     bool convertTo(::android::OMXFenceParcelable* l,
             ::android::hardware::hidl_handle const& t);
-}}}}}
+}
+namespace utils {
+    void wrapAs(::android::OMXFenceParcelable* l,
+            ::android::hardware::hidl_handle const& t);
+    bool convertTo(::android::OMXFenceParcelable* l,
+            ::android::hardware::hidl_handle const& t);
+}
+}}}}
 
 struct OMXFenceParcelable : public Parcelable {
     OMXFenceParcelable() : mFenceFd(-1) {}
@@ -56,6 +64,12 @@
     friend bool (::android::hardware::media::omx::V1_0::implementation::
             convertTo)(OMXFenceParcelable* l,
             ::android::hardware::hidl_handle const& t);
+    friend void (::android::hardware::media::omx::V1_0::utils::
+            wrapAs)(OMXFenceParcelable* l,
+            ::android::hardware::hidl_handle const& t);
+    friend bool (::android::hardware::media::omx::V1_0::utils::
+            convertTo)(OMXFenceParcelable* l,
+            ::android::hardware::hidl_handle const& t);
 };
 
 inline status_t OMXFenceParcelable::readFromParcel(const Parcel* parcel) {
diff --git a/include/media/PluginLoader.h b/include/media/PluginLoader.h
index 7d54ce4..a626e16 100644
--- a/include/media/PluginLoader.h
+++ b/include/media/PluginLoader.h
@@ -43,7 +43,7 @@
             while ((pEntry = readdir(pDir))) {
                 String8 file(pEntry->d_name);
                 if (file.getPathExtension() == ".so") {
-                    String8 path = pluginDir + pEntry->d_name;
+                    String8 path = pluginDir + "/" + pEntry->d_name;
                     T *plugin = loadOne(path, entry);
                     if (plugin) {
                         factories.push(plugin);
@@ -60,7 +60,10 @@
         }
     }
 
-    T *getFactory(size_t i) const {return factories[i];}
+    T *getFactory(size_t i) const {
+        return factories[i];
+    }
+
     size_t factoryCount() const {return factories.size();}
 
   private:
@@ -74,11 +77,11 @@
             CreateFactoryFunc createFactoryFunc =
                     (CreateFactoryFunc)library->lookup(entry);
             if (createFactoryFunc) {
+                ALOGV("Found plugin factory entry %s in %s", entry, path);
                 libraries.push(library);
-                return createFactoryFunc();
-            } else {
-                ALOGE("Failed to create plugin factory from %s", path);
-            }
+                T* result = createFactoryFunc();
+                return  result;
+           }
         }
         return NULL;
     }
diff --git a/include/media/RecordBufferConverter.h b/include/media/RecordBufferConverter.h
new file mode 100644
index 0000000..2abc45e
--- /dev/null
+++ b/include/media/RecordBufferConverter.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_RECORD_BUFFER_CONVERTER_H
+#define ANDROID_RECORD_BUFFER_CONVERTER_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <media/AudioBufferProvider.h>
+#include <system/audio.h>
+
+class AudioResampler;
+class PassthruBufferProvider;
+
+namespace android {
+
+/* The RecordBufferConverter is used for format, channel, and sample rate
+ * conversion for a RecordTrack.
+ *
+ * RecordBufferConverter uses the convert() method rather than exposing a
+ * buffer provider interface; this is to save a memory copy.
+ *
+ * There are legacy conversion requirements for this converter, specifically
+ * due to mono handling, so be careful about modifying.
+ *
+ * Original source audioflinger/Threads.{h,cpp}
+ */
+class RecordBufferConverter
+{
+public:
+    RecordBufferConverter(
+            audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+            uint32_t srcSampleRate,
+            audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+            uint32_t dstSampleRate);
+
+    ~RecordBufferConverter();
+
+    /* Converts input data from an AudioBufferProvider by format, channelMask,
+     * and sampleRate to a destination buffer.
+     *
+     * Parameters
+     *      dst:  buffer to place the converted data.
+     * provider:  buffer provider to obtain source data.
+     *   frames:  number of frames to convert
+     *
+     * Returns the number of frames converted.
+     */
+    size_t convert(void *dst, AudioBufferProvider *provider, size_t frames);
+
+    // returns NO_ERROR if constructor was successful
+    status_t initCheck() const {
+        // mSrcChannelMask set on successful updateParameters
+        return mSrcChannelMask != AUDIO_CHANNEL_INVALID ? NO_ERROR : NO_INIT;
+    }
+
+    // allows dynamic reconfigure of all parameters
+    status_t updateParameters(
+            audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+            uint32_t srcSampleRate,
+            audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+            uint32_t dstSampleRate);
+
+    // called to reset resampler buffers on record track discontinuity
+    void reset();
+
+private:
+    // format conversion when not using resampler
+    void convertNoResampler(void *dst, const void *src, size_t frames);
+
+    // format conversion when using resampler; modifies src in-place
+    void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
+
+    // user provided information
+    audio_channel_mask_t mSrcChannelMask;
+    audio_format_t       mSrcFormat;
+    uint32_t             mSrcSampleRate;
+    audio_channel_mask_t mDstChannelMask;
+    audio_format_t       mDstFormat;
+    uint32_t             mDstSampleRate;
+
+    // derived information
+    uint32_t             mSrcChannelCount;
+    uint32_t             mDstChannelCount;
+    size_t               mDstFrameSize;
+
+    // format conversion buffer
+    void                *mBuf;
+    size_t               mBufFrames;
+    size_t               mBufFrameSize;
+
+    // resampler info
+    AudioResampler      *mResampler;
+
+    bool                 mIsLegacyDownmix;  // legacy stereo to mono conversion needed
+    bool                 mIsLegacyUpmix;    // legacy mono to stereo conversion needed
+    bool                 mRequiresFloat;    // data processing requires float (e.g. resampler)
+    PassthruBufferProvider *mInputConverterProvider;    // converts input to float
+    int8_t               mIdxAry[sizeof(uint32_t) * 8]; // used for channel mask conversion
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_RECORD_BUFFER_CONVERTER_H
diff --git a/include/media/TypeConverter.h b/include/media/TypeConverter.h
index ffe4c1f..e262eef 100644
--- a/include/media/TypeConverter.h
+++ b/include/media/TypeConverter.h
@@ -80,6 +80,16 @@
     typedef audio_mode_t Type;
     typedef Vector<Type> Collection;
 };
+struct UsageTraits
+{
+    typedef audio_usage_t Type;
+    typedef Vector<Type> Collection;
+};
+struct SourceTraits
+{
+    typedef audio_source_t Type;
+    typedef Vector<Type> Collection;
+};
 template <typename T>
 struct DefaultTraits
 {
@@ -215,6 +225,8 @@
 typedef TypeConverter<GainModeTraits> GainModeConverter;
 typedef TypeConverter<StreamTraits> StreamTypeConverter;
 typedef TypeConverter<AudioModeTraits> AudioModeConverter;
+typedef TypeConverter<UsageTraits> UsageTypeConverter;
+typedef TypeConverter<SourceTraits> SourceTypeConverter;
 
 bool deviceFromString(const std::string& literalDevice, audio_devices_t& device);
 
diff --git a/include/media/audiohal/EffectBufferHalInterface.h b/include/media/audiohal/EffectBufferHalInterface.h
index 102ec56..6fa7940 100644
--- a/include/media/audiohal/EffectBufferHalInterface.h
+++ b/include/media/audiohal/EffectBufferHalInterface.h
@@ -42,6 +42,8 @@
 
     virtual void update() = 0;  // copies data from the external buffer, noop for allocated buffers
     virtual void commit() = 0;  // copies data to the external buffer, noop for allocated buffers
+    virtual void update(size_t size) = 0;  // copies partial data from external buffer
+    virtual void commit(size_t size) = 0;  // copies partial data to external buffer
 
     static status_t allocate(size_t size, sp<EffectBufferHalInterface>* buffer);
     static status_t mirror(void* external, size_t size, sp<EffectBufferHalInterface>* buffer);
diff --git a/include/media/audiohal/hidl/HalDeathHandler.h b/include/media/audiohal/hidl/HalDeathHandler.h
new file mode 100644
index 0000000..c9b7084
--- /dev/null
+++ b/include/media/audiohal/hidl/HalDeathHandler.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
+#define ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
+
+#include <functional>
+#include <mutex>
+#include <unordered_map>
+
+#include <hidl/HidlSupport.h>
+#include <utils/Singleton.h>
+
+using android::hardware::hidl_death_recipient;
+using android::hidl::base::V1_0::IBase;
+
+namespace android {
+
+class HalDeathHandler : public hidl_death_recipient, private Singleton<HalDeathHandler> {
+  public:
+    typedef std::function<void()> AtExitHandler;
+
+    // Note that the exit handler gets called using a thread from
+    // RPC threadpool, thus it needs to be thread-safe.
+    void registerAtExitHandler(void* cookie, AtExitHandler handler);
+    void unregisterAtExitHandler(void* cookie);
+
+    // hidl_death_recipient
+    virtual void serviceDied(uint64_t cookie, const wp<IBase>& who);
+
+    // Used both for (un)registering handlers, and for passing to
+    // '(un)linkToDeath'.
+    static sp<HalDeathHandler> getInstance();
+
+  private:
+    friend class Singleton<HalDeathHandler>;
+    typedef std::unordered_map<void*, AtExitHandler> Handlers;
+
+    HalDeathHandler();
+    virtual ~HalDeathHandler();
+
+    sp<HalDeathHandler> mSelf;  // Allows the singleton instance to live forever.
+    std::mutex mHandlersLock;
+    Handlers mHandlers;
+};
+
+}  // namespace android
+
+#endif // ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index be34d027..2c5ff1f 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -22,6 +22,7 @@
 #include <binder/IMemory.h>
 
 #include <media/AudioResamplerPublic.h>
+#include <media/BufferingSettings.h>
 #include <media/IMediaPlayerClient.h>
 #include <media/IMediaPlayer.h>
 #include <media/IMediaDeathNotifier.h>
@@ -54,6 +55,7 @@
     MEDIA_INFO              = 200,
     MEDIA_SUBTITLE_DATA     = 201,
     MEDIA_META_DATA         = 202,
+    MEDIA_DRM_INFO          = 210,
 };
 
 // Generic error codes for the media player framework.  Errors are fatal, the
@@ -219,6 +221,9 @@
             status_t        setVideoSurfaceTexture(
                                     const sp<IGraphicBufferProducer>& bufferProducer);
             status_t        setListener(const sp<MediaPlayerListener>& listener);
+            status_t        getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */);
+            status_t        getBufferingSettings(BufferingSettings* buffering /* nonnull */);
+            status_t        setBufferingSettings(const BufferingSettings& buffering);
             status_t        prepare();
             status_t        prepareAsync();
             status_t        start();
@@ -256,6 +261,19 @@
             status_t        getParameter(int key, Parcel* reply);
             status_t        setRetransmitEndpoint(const char* addrString, uint16_t port);
             status_t        setNextMediaPlayer(const sp<MediaPlayer>& player);
+            // ModDrm
+            status_t        prepareDrm(const uint8_t uuid[16], const int mode);
+            status_t        releaseDrm();
+            status_t        getKeyRequest(Vector<uint8_t> const& scope, String8 const& mimeType,
+                                    DrmPlugin::KeyType keyType,
+                                    KeyedVector<String8, String8>& optionalParameters,
+                                    Vector<uint8_t>& request, String8& defaultUrl,
+                                    DrmPlugin::KeyRequestType& keyRequestType);
+            status_t        provideKeyResponse(Vector<uint8_t>& releaseKeySetId,
+                                    Vector<uint8_t>& response, Vector<uint8_t>& keySetId);
+            status_t        restoreKeys(Vector<uint8_t> const& keySetId);
+            status_t        getDrmPropertyString(String8 const& name, String8& value);
+            status_t        setDrmPropertyString(String8 const& name, String8 const& value);
 
 private:
             void            clear_l();
@@ -292,6 +310,7 @@
     float                       mSendLevel;
     struct sockaddr_in          mRetransmitEndpoint;
     bool                        mRetransmitEndpointValid;
+    BufferingSettings           mCurrentBufferingSettings;
 };
 
 }; // namespace android
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index c3f39a2..c2916be 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -171,6 +171,8 @@
 
     MEDIA_RECORDER_INFO_MAX_DURATION_REACHED      = 800,
     MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED      = 801,
+    MEDIA_RECORDER_INFO_MAX_FILESIZE_APPROACHING  = 802,
+    MEDIA_RECORDER_INFO_NEXT_OUTPUT_FILE_STARTED  = 803,
 
     // All track related informtional events start here
     MEDIA_RECORDER_TRACK_INFO_LIST_START           = 1000,
@@ -227,7 +229,8 @@
     status_t    setOutputFormat(int of);
     status_t    setVideoEncoder(int ve);
     status_t    setAudioEncoder(int ae);
-    status_t    setOutputFile(int fd, int64_t offset, int64_t length);
+    status_t    setOutputFile(int fd);
+    status_t    setNextOutputFile(int fd);
     status_t    setVideoSize(int width, int height);
     status_t    setVideoFrameRate(int frames_per_second);
     status_t    setParameters(const String8& params);
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 3420617..814a643 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -41,6 +41,14 @@
 struct DescribeColorFormat2Params;
 struct DataConverter;
 
+// Treble shared memory
+namespace hidl { namespace memory { namespace V1_0 {
+struct IAllocator;
+struct IMemory;
+}}};
+typedef hidl::memory::V1_0::IAllocator TAllocator;
+typedef hidl::memory::V1_0::IMemory TMemory;
+
 struct ACodec : public AHierarchicalStateMachine, public CodecBase {
     ACodec();
 
@@ -86,6 +94,12 @@
 
     static status_t getOMXChannelMapping(size_t numChannels, OMX_AUDIO_CHANNELTYPE map[]);
 
+    // Read the flag from "media.use_treble_omx", save it locally, and return
+    // it.
+    bool updateTrebleFlag();
+    // Return the saved flag.
+    bool getTrebleFlag() const;
+
 protected:
     virtual ~ACodec();
 
@@ -218,6 +232,8 @@
     sp<IOMX> mOMX;
     sp<IOMXNode> mOMXNode;
     int32_t mNodeGeneration;
+    bool mTrebleFlag;
+    sp<TAllocator> mAllocator[2];
     sp<MemoryDealer> mDealer[2];
 
     bool mUsingNativeWindow;
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index d415b8b..39e7d01 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -24,10 +24,12 @@
 #include <media/stagefright/MediaWriter.h>
 #include <utils/List.h>
 #include <utils/threads.h>
+#include <media/stagefright/foundation/AHandlerReflector.h>
+#include <media/stagefright/foundation/ALooper.h>
 
 namespace android {
 
-struct AMessage;
+class AMessage;
 class MediaBuffer;
 class MetaData;
 
@@ -65,16 +67,25 @@
     status_t setGeoData(int latitudex10000, int longitudex10000);
     status_t setCaptureRate(float captureFps);
     status_t setTemporalLayerCount(uint32_t layerCount);
+    void notifyApproachingLimit();
     virtual void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
     virtual int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
+    virtual status_t setNextFd(int fd);
 
 protected:
     virtual ~MPEG4Writer();
 
 private:
     class Track;
+    friend struct AHandlerReflector<MPEG4Writer>;
+
+    enum {
+        kWhatSwitch                          = 'swch',
+    };
 
     int  mFd;
+    int mNextFd;
+    sp<MetaData> mStartMeta;
     status_t mInitCheck;
     bool mIsRealTimeRecording;
     bool mUse4ByteNalLength;
@@ -83,6 +94,7 @@
     bool mPaused;
     bool mStarted;  // Writer thread + track threads started successfully
     bool mWriterThreadStarted;  // Only writer thread started successfully
+    bool mSendNotify;
     off64_t mOffset;
     off_t mMdatOffset;
     uint8_t *mMoovBoxBuffer;
@@ -99,6 +111,10 @@
     int mLongitudex10000;
     bool mAreGeoTagsAvailable;
     int32_t mStartTimeOffsetMs;
+    bool mSwitchPending;
+
+    sp<ALooper> mLooper;
+    sp<AHandlerReflector<MPEG4Writer> > mReflector;
 
     Mutex mLock;
 
@@ -184,6 +200,8 @@
     void lock();
     void unlock();
 
+    void initInternal(int fd);
+
     // Acquire lock before calling these methods
     off64_t addSample_l(MediaBuffer *buffer);
     off64_t addLengthPrefixedSample_l(MediaBuffer *buffer);
@@ -192,6 +210,7 @@
     bool exceedsFileSizeLimit();
     bool use32BitFileOffset() const;
     bool exceedsFileDurationLimit();
+    bool approachingFileSizeLimit();
     bool isFileStreamable() const;
     void trackProgressStatus(size_t trackId, int64_t timeUs, status_t err = OK);
     void writeCompositionMatrix(int32_t degrees);
@@ -202,6 +221,7 @@
     void writeGeoDataBox();
     void writeLatitude(int degreex10000);
     void writeLongitude(int degreex10000);
+    void finishCurrentSession();
 
     void addDeviceMeta();
     void writeHdlr();
@@ -210,10 +230,13 @@
     void writeMetaBox();
     void sendSessionSummary();
     void release();
-    status_t reset();
+    status_t switchFd();
+    status_t reset(bool stopSource = true);
 
     static uint32_t getMpeg4Time();
 
+    void onMessageReceived(const sp<AMessage> &msg);
+
     MPEG4Writer(const MPEG4Writer &);
     MPEG4Writer &operator=(const MPEG4Writer &);
 };
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 2e367bf..699ae48 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -25,6 +25,7 @@
 #include <media/hardware/CryptoAPI.h>
 #include <media/MediaCodecInfo.h>
 #include <media/MediaResource.h>
+#include <media/MediaAnalyticsItem.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/stagefright/FrameRenderTracker.h>
 #include <utils/Vector.h>
@@ -172,6 +173,8 @@
 
     status_t getName(AString *componentName) const;
 
+    status_t getMetrics(Parcel *reply);
+
     status_t setParameters(const sp<AMessage> &params);
 
     // Create a MediaCodec notification message from a list of rendered or dropped render infos
@@ -298,6 +301,8 @@
     sp<Surface> mSurface;
     SoftwareRenderer *mSoftRenderer;
 
+    MediaAnalyticsItem *mAnalyticsItem;
+
     sp<AMessage> mOutputFormat;
     sp<AMessage> mInputFormat;
     sp<AMessage> mCallback;
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index fbb4a67..211f794 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -20,6 +20,7 @@
 
 #include <media/IMediaExtractor.h>
 #include <media/IMediaSource.h>
+#include <media/MediaAnalyticsItem.h>
 
 namespace android {
 
@@ -69,10 +70,11 @@
 
 protected:
     MediaExtractor();
-    virtual ~MediaExtractor() {}
+    virtual ~MediaExtractor();
+
+    MediaAnalyticsItem *mAnalyticsItem;
 
 private:
-    bool mIsDrm;
 
     typedef bool (*SnifferFunc)(
             const sp<DataSource> &source, String8 *mimeType,
diff --git a/include/media/stagefright/MediaMuxer.h b/include/media/stagefright/MediaMuxer.h
index fa855a8..63c3ca5 100644
--- a/include/media/stagefright/MediaMuxer.h
+++ b/include/media/stagefright/MediaMuxer.h
@@ -45,8 +45,9 @@
     // Please update media/java/android/media/MediaMuxer.java if the
     // OutputFormat is updated.
     enum OutputFormat {
-        OUTPUT_FORMAT_MPEG_4 = 0,
-        OUTPUT_FORMAT_WEBM   = 1,
+        OUTPUT_FORMAT_MPEG_4      = 0,
+        OUTPUT_FORMAT_WEBM        = 1,
+        OUTPUT_FORMAT_THREE_GPP   = 2,
         OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
     };
 
diff --git a/include/media/stagefright/MediaWriter.h b/include/media/stagefright/MediaWriter.h
index 2b19523..9c30ffa 100644
--- a/include/media/stagefright/MediaWriter.h
+++ b/include/media/stagefright/MediaWriter.h
@@ -50,6 +50,7 @@
 
     virtual void setStartTimeOffsetMs(int /*ms*/) {}
     virtual int32_t getStartTimeOffsetMs() const { return 0; }
+    virtual status_t setNextFd(int fd) { return INVALID_OPERATION; }
 
 protected:
     virtual ~MediaWriter() {}
diff --git a/include/media/stagefright/OMXClient.h b/include/media/stagefright/OMXClient.h
index 6973405..6b86cbf 100644
--- a/include/media/stagefright/OMXClient.h
+++ b/include/media/stagefright/OMXClient.h
@@ -27,6 +27,7 @@
     OMXClient();
 
     status_t connect();
+    status_t connectTreble();
     void disconnect();
 
     sp<IOMX> interface() {
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index 8eff914..88a416a 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -23,6 +23,7 @@
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 #include <system/audio.h>
+#include <media/BufferingSettings.h>
 #include <media/MediaPlayerInterface.h>
 
 namespace android {
@@ -90,6 +91,9 @@
 void readFromAMessage(
         const sp<AMessage> &msg, AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
 
+void writeToAMessage(const sp<AMessage> &msg, const BufferingSettings &buffering);
+void readFromAMessage(const sp<AMessage> &msg, BufferingSettings *buffering /* nonnull */);
+
 AString nameForFd(int fd);
 
 }  // namespace android
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index f9ab208..0de3559 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -383,6 +383,10 @@
         if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
             flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
         }
+        // check deep buffer after flags have been modified above
+        if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
+            flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+        }
     }
 
     // these below should probably come from the audioFlinger too...
diff --git a/media/libaudiohal/Android.mk b/media/libaudiohal/Android.mk
index deb26b5..5e00b77 100644
--- a/media/libaudiohal/Android.mk
+++ b/media/libaudiohal/Android.mk
@@ -9,12 +9,24 @@
     liblog      \
     libutils
 
-ifeq ($(ENABLE_TREBLE), true)
+ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL), true)
 
-LOCAL_CFLAGS += -DENABLE_TREBLE
+# Use audiohal directly w/o hwbinder middleware.
+# This is for performance comparison and debugging only.
+
+LOCAL_SRC_FILES := \
+    DeviceHalLocal.cpp          \
+    DevicesFactoryHalLocal.cpp  \
+    EffectBufferHalLocal.cpp    \
+    EffectHalLocal.cpp          \
+    EffectsFactoryHalLocal.cpp  \
+    StreamHalLocal.cpp
+
+else  # if !USE_LEGACY_LOCAL_AUDIO_HAL
 
 LOCAL_SRC_FILES := \
     ConversionHelperHidl.cpp   \
+    HalDeathHandlerHidl.cpp   \
     DeviceHalHidl.cpp          \
     DevicesFactoryHalHidl.cpp  \
     EffectBufferHalHidl.cpp    \
@@ -36,16 +48,7 @@
     android.hidl.memory@1.0                \
     libmedia_helper
 
-else  # if !ENABLE_TREBLE
-
-LOCAL_SRC_FILES := \
-    DeviceHalLocal.cpp          \
-    DevicesFactoryHalLocal.cpp  \
-    EffectBufferHalLocal.cpp    \
-    EffectHalLocal.cpp          \
-    EffectsFactoryHalLocal.cpp  \
-    StreamHalLocal.cpp
-endif  # ENABLE_TREBLE
+endif  # USE_LEGACY_LOCAL_AUDIO_HAL
 
 LOCAL_MODULE := libaudiohal
 
diff --git a/media/libaudiohal/ConversionHelperHidl.h b/media/libaudiohal/ConversionHelperHidl.h
index 00d5b2c..23fb360 100644
--- a/media/libaudiohal/ConversionHelperHidl.h
+++ b/media/libaudiohal/ConversionHelperHidl.h
@@ -52,7 +52,7 @@
         if (!ret.isOk()) {
             emitError(funcName, ret.description().c_str());
         }
-        return ret.isOk() ? OK : UNKNOWN_ERROR;
+        return ret.isOk() ? OK : FAILED_TRANSACTION;
     }
 
     status_t processReturn(const char* funcName, const Return<hardware::audio::V2_0::Result>& ret) {
@@ -62,7 +62,7 @@
     template<typename T>
     status_t processReturn(
             const char* funcName, const Return<T>& ret, hardware::audio::V2_0::Result retval) {
-        const status_t st = ret.isOk() ? analyzeResult(retval) : UNKNOWN_ERROR;
+        const status_t st = ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
         if (!ret.isOk()) {
             emitError(funcName, ret.description().c_str());
         }
diff --git a/media/libaudiohal/DeviceHalHidl.cpp b/media/libaudiohal/DeviceHalHidl.cpp
index a6ced12..dcedfd3 100644
--- a/media/libaudiohal/DeviceHalHidl.cpp
+++ b/media/libaudiohal/DeviceHalHidl.cpp
@@ -50,6 +50,10 @@
 status_t deviceAddressFromHal(
         audio_devices_t device, const char* halAddress, DeviceAddress* address) {
     address->device = AudioDevice(device);
+
+    if (address == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
+        return OK;
+    }
     const bool isInput = (device & AUDIO_DEVICE_BIT_IN) != 0;
     if (isInput) device &= ~AUDIO_DEVICE_BIT_IN;
     if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_A2DP) != 0)
diff --git a/media/libaudiohal/DevicesFactoryHalHidl.cpp b/media/libaudiohal/DevicesFactoryHalHidl.cpp
index 6444079..b0f28b9 100644
--- a/media/libaudiohal/DevicesFactoryHalHidl.cpp
+++ b/media/libaudiohal/DevicesFactoryHalHidl.cpp
@@ -20,6 +20,7 @@
 //#define LOG_NDEBUG 0
 
 #include <android/hardware/audio/2.0/IDevice.h>
+#include <media/audiohal/hidl/HalDeathHandler.h>
 #include <utils/Log.h>
 
 #include "ConversionHelperHidl.h"
@@ -40,6 +41,11 @@
 
 DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
     mDevicesFactory = IDevicesFactory::getService("audio_devices_factory");
+    if (mDevicesFactory != 0) {
+        // It is assumet that DevicesFactory is owned by AudioFlinger
+        // and thus have the same lifespan.
+        mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+    }
 }
 
 DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
@@ -59,6 +65,9 @@
     } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
         *device = IDevicesFactory::Device::R_SUBMIX;
         return OK;
+    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_STUB) == 0) {
+        *device = IDevicesFactory::Device::STUB;
+        return OK;
     }
     ALOGE("Invalid device name %s", name);
     return BAD_VALUE;
@@ -83,7 +92,7 @@
         else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
         else return NO_INIT;
     }
-    return UNKNOWN_ERROR;
+    return FAILED_TRANSACTION;
 }
 
 } // namespace android
diff --git a/media/libaudiohal/EffectBufferHalHidl.cpp b/media/libaudiohal/EffectBufferHalHidl.cpp
index 446d2ef..82b4a75 100644
--- a/media/libaudiohal/EffectBufferHalHidl.cpp
+++ b/media/libaudiohal/EffectBufferHalHidl.cpp
@@ -80,7 +80,7 @@
                     retval = OK;
                 }
             });
-    if (retval == OK) {
+    if (result.isOk() && retval == OK) {
         mMemory = hardware::mapMemory(mHidlBuffer.data);
         if (mMemory != 0) {
             mMemory->update();
@@ -91,8 +91,10 @@
             ALOGE("Failed to map allocated ashmem");
             retval = NO_MEMORY;
         }
+    } else {
+        ALOGE("Failed to allocate %d bytes from ashmem", (int)mBufferSize);
     }
-    return retval;
+    return result.isOk() ? retval : FAILED_TRANSACTION;
 }
 
 audio_buffer_t* EffectBufferHalHidl::audioBuffer() {
@@ -113,15 +115,25 @@
 }
 
 void EffectBufferHalHidl::update() {
-    if (mExternalData == nullptr) return;
-    mMemory->update();
-    memcpy(mAudioBuffer.raw, mExternalData, mBufferSize);
-    mMemory->commit();
+    update(mBufferSize);
 }
 
 void EffectBufferHalHidl::commit() {
+    commit(mBufferSize);
+}
+
+void EffectBufferHalHidl::update(size_t size) {
     if (mExternalData == nullptr) return;
-    memcpy(mExternalData, mAudioBuffer.raw, mBufferSize);
+    mMemory->update();
+    if (size > mBufferSize) size = mBufferSize;
+    memcpy(mAudioBuffer.raw, mExternalData, size);
+    mMemory->commit();
+}
+
+void EffectBufferHalHidl::commit(size_t size) {
+    if (mExternalData == nullptr) return;
+    if (size > mBufferSize) size = mBufferSize;
+    memcpy(mExternalData, mAudioBuffer.raw, size);
 }
 
 } // namespace android
diff --git a/media/libaudiohal/EffectBufferHalHidl.h b/media/libaudiohal/EffectBufferHalHidl.h
index 4c4ec87..6e9fd0b 100644
--- a/media/libaudiohal/EffectBufferHalHidl.h
+++ b/media/libaudiohal/EffectBufferHalHidl.h
@@ -40,6 +40,8 @@
 
     virtual void update();
     virtual void commit();
+    virtual void update(size_t size);
+    virtual void commit(size_t size);
 
     const AudioBuffer& hidlBuffer() const { return mHidlBuffer; }
 
diff --git a/media/libaudiohal/EffectBufferHalLocal.cpp b/media/libaudiohal/EffectBufferHalLocal.cpp
index 20b1339..9fe2c7b 100644
--- a/media/libaudiohal/EffectBufferHalLocal.cpp
+++ b/media/libaudiohal/EffectBufferHalLocal.cpp
@@ -75,4 +75,10 @@
 void EffectBufferHalLocal::commit() {
 }
 
+void EffectBufferHalLocal::update(size_t) {
+}
+
+void EffectBufferHalLocal::commit(size_t) {
+}
+
 } // namespace android
diff --git a/media/libaudiohal/EffectBufferHalLocal.h b/media/libaudiohal/EffectBufferHalLocal.h
index df7bd43..202d878 100644
--- a/media/libaudiohal/EffectBufferHalLocal.h
+++ b/media/libaudiohal/EffectBufferHalLocal.h
@@ -35,6 +35,8 @@
 
     virtual void update();
     virtual void commit();
+    virtual void update(size_t size);
+    virtual void commit(size_t size);
 
   private:
     friend class EffectBufferHalInterface;
diff --git a/media/libaudiohal/EffectHalHidl.cpp b/media/libaudiohal/EffectHalHidl.cpp
index f1f3f2a..d74ef8a 100644
--- a/media/libaudiohal/EffectHalHidl.cpp
+++ b/media/libaudiohal/EffectHalHidl.cpp
@@ -160,26 +160,30 @@
         mBuffersChanged = false;
         return OK;
     }
-    return ret.isOk() ? analyzeResult(ret) : UNKNOWN_ERROR;
+    return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
 }
 
 status_t EffectHalHidl::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
         uint32_t *replySize, void *pReplyData) {
     if (mEffect == 0) return NO_INIT;
     hidl_vec<uint8_t> hidlData;
-    hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
+    if (pCmdData != nullptr && cmdSize > 0) {
+        hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
+    }
     status_t status;
+    uint32_t replySizeStub = 0;
+    if (replySize == nullptr || pReplyData == nullptr) replySize = &replySizeStub;
     Return<void> ret = mEffect->command(cmdCode, hidlData, *replySize,
             [&](int32_t s, const hidl_vec<uint8_t>& result) {
                 status = s;
                 if (status == 0) {
                     if (*replySize > result.size()) *replySize = result.size();
-                    if (pReplyData && *replySize > 0) {
+                    if (pReplyData != nullptr && *replySize > 0) {
                         memcpy(pReplyData, &result[0], *replySize);
                     }
                 }
             });
-    return status;
+    return ret.isOk() ? status : FAILED_TRANSACTION;
 }
 
 status_t EffectHalHidl::getDescriptor(effect_descriptor_t *pDescriptor) {
@@ -192,13 +196,13 @@
                     effectDescriptorToHal(result, pDescriptor);
                 }
             });
-    return ret.isOk() ? analyzeResult(retval) : UNKNOWN_ERROR;
+    return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
 }
 
 status_t EffectHalHidl::close() {
     if (mEffect == 0) return NO_INIT;
     Return<Result> ret = mEffect->close();
-    return ret.isOk() ? analyzeResult(ret) : UNKNOWN_ERROR;
+    return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
 }
 
 } // namespace android
diff --git a/media/libaudiohal/EffectHalLocal.cpp b/media/libaudiohal/EffectHalLocal.cpp
index b4f1934..dd465c3 100644
--- a/media/libaudiohal/EffectHalLocal.cpp
+++ b/media/libaudiohal/EffectHalLocal.cpp
@@ -45,11 +45,21 @@
 }
 
 status_t EffectHalLocal::process() {
+    if (mInBuffer == nullptr || mOutBuffer == nullptr) {
+        ALOGE_IF(mInBuffer == nullptr, "Input buffer not set");
+        ALOGE_IF(mOutBuffer == nullptr, "Output buffer not set");
+        return NO_INIT;
+    }
     return (*mHandle)->process(mHandle, mInBuffer->audioBuffer(), mOutBuffer->audioBuffer());
 }
 
 status_t EffectHalLocal::processReverse() {
     if ((*mHandle)->process_reverse != NULL) {
+        if (mInBuffer == nullptr || mOutBuffer == nullptr) {
+            ALOGE_IF(mInBuffer == nullptr, "Input buffer not set");
+            ALOGE_IF(mOutBuffer == nullptr, "Output buffer not set");
+            return NO_INIT;
+        }
         return (*mHandle)->process_reverse(
                 mHandle, mInBuffer->audioBuffer(), mOutBuffer->audioBuffer());
     } else {
diff --git a/media/libaudiohal/EffectsFactoryHalHidl.cpp b/media/libaudiohal/EffectsFactoryHalHidl.cpp
index 1ab5dad..ad12654 100644
--- a/media/libaudiohal/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/EffectsFactoryHalHidl.cpp
@@ -17,7 +17,9 @@
 #define LOG_TAG "EffectsFactoryHalHidl"
 //#define LOG_NDEBUG 0
 
+#include <android/hidl/memory/1.0/IAllocator.h>
 #include <cutils/native_handle.h>
+#include <hidl/ServiceManagement.h>
 #include <media/EffectsFactoryApi.h>
 
 #include "ConversionHelperHidl.h"
@@ -45,6 +47,10 @@
 
 EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory"){
     mEffectsFactory = IEffectsFactory::getService("audio_effects_factory");
+    // TODO: Waiting should not be needed (b/34772726).
+    // Also remove include of IAllocator.h and ServiceManagement.h
+    android::hardware::details::waitForHwService(
+            hidl::memory::V1_0::IAllocator::descriptor, "ashmem");
 }
 
 EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
diff --git a/media/libaudiohal/HalDeathHandlerHidl.cpp b/media/libaudiohal/HalDeathHandlerHidl.cpp
new file mode 100644
index 0000000..a742671
--- /dev/null
+++ b/media/libaudiohal/HalDeathHandlerHidl.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "HalDeathHandler"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include <media/audiohal/hidl/HalDeathHandler.h>
+
+namespace android {
+
+ANDROID_SINGLETON_STATIC_INSTANCE(HalDeathHandler);
+
+// static
+sp<HalDeathHandler> HalDeathHandler::getInstance() {
+    return &Singleton<HalDeathHandler>::getInstance();
+}
+
+HalDeathHandler::HalDeathHandler() : mSelf(this) {
+}
+
+HalDeathHandler::~HalDeathHandler() {
+}
+
+void HalDeathHandler::registerAtExitHandler(void* cookie, AtExitHandler handler) {
+    std::lock_guard<std::mutex> guard(mHandlersLock);
+    mHandlers.insert({cookie, handler});
+}
+
+void HalDeathHandler::unregisterAtExitHandler(void* cookie) {
+    std::lock_guard<std::mutex> guard(mHandlersLock);
+    mHandlers.erase(cookie);
+}
+
+void HalDeathHandler::serviceDied(uint64_t /*cookie*/, const wp<IBase>& /*who*/) {
+    // No matter which of the service objects has died,
+    // we need to run all the registered handlers and crash our process.
+    std::lock_guard<std::mutex> guard(mHandlersLock);
+    for (const auto& handler : mHandlers) {
+        handler.second();
+    }
+    LOG_ALWAYS_FATAL("HAL server crashed, need to restart");
+}
+
+} // namespace android
diff --git a/media/libaudiohal/StreamHalHidl.cpp b/media/libaudiohal/StreamHalHidl.cpp
index 5943e22..9ee8fa8 100644
--- a/media/libaudiohal/StreamHalHidl.cpp
+++ b/media/libaudiohal/StreamHalHidl.cpp
@@ -242,7 +242,7 @@
 
 StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
         : StreamHalHidl(stream.get()), mStream(stream), mEfGroup(nullptr),
-          mGetPresentationPositionNotSupported(false), mPPosFromWriteObtained(0) {
+          mGetPresentationPositionNotSupported(false), mPPosFromWrite{ 0, OK, 0, { 0, 0 } } {
 }
 
 StreamOutHalHidl::~StreamOutHalHidl() {
@@ -301,19 +301,22 @@
     status_t ret = mEfGroup->wait(
             static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL), &efState, NS_PER_SEC);
     if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL)) {
-        WriteStatus writeStatus = { Result::NOT_INITIALIZED, 0, 0, { 0, 0 } };
+        WriteStatus writeStatus =
+                { Result::NOT_INITIALIZED, 0, Result::NOT_INITIALIZED, 0, { 0, 0 } };
         mStatusMQ->read(&writeStatus);
-        if (writeStatus.retval == Result::OK) {
+        if (writeStatus.writeRetval == Result::OK) {
             status = OK;
             *written = writeStatus.written;
-            mPPosFromWriteFrames = writeStatus.frames;
-            mPPosFromWriteTS.tv_sec = writeStatus.timeStamp.tvSec;
-            mPPosFromWriteTS.tv_nsec = writeStatus.timeStamp.tvNSec;
-            struct timespec timeNow;
-            clock_gettime(CLOCK_MONOTONIC, &timeNow);
-            mPPosFromWriteObtained = timeNow.tv_sec * 1000000 + timeNow.tv_nsec / 1000;
+            mPPosFromWrite.status = processReturn(
+                    "get_presentation_position", writeStatus.presentationPositionRetval);
+            if (mPPosFromWrite.status == OK) {
+                mPPosFromWrite.frames = writeStatus.frames;
+                mPPosFromWrite.ts.tv_sec = writeStatus.timeStamp.tvSec;
+                mPPosFromWrite.ts.tv_nsec = writeStatus.timeStamp.tvNSec;
+            }
+            mPPosFromWrite.obtained = getCurrentTimeMs();
         } else {
-            status = processReturn("write", writeStatus.retval);
+            status = processReturn("write", writeStatus.writeRetval);
         }
         return status;
     }
@@ -324,6 +327,12 @@
     return ret;
 }
 
+uint64_t StreamOutHalHidl::getCurrentTimeMs() {
+    struct timespec timeNow;
+    clock_gettime(CLOCK_MONOTONIC, &timeNow);
+    return timeNow.tv_sec * 1000000 + timeNow.tv_nsec / 1000;
+}
+
 status_t StreamOutHalHidl::prepareForWriting(size_t bufferSize) {
     std::unique_ptr<DataMQ> tempDataMQ;
     std::unique_ptr<StatusMQ> tempStatusMQ;
@@ -331,8 +340,8 @@
     Return<void> ret = mStream->prepareForWriting(
             1, bufferSize, ThreadPriority(mHalThreadPriority),
             [&](Result r,
-                    const MQDescriptorSync<uint8_t>& dataMQ,
-                    const MQDescriptorSync<WriteStatus>& statusMQ) {
+                    const DataMQ::Descriptor& dataMQ,
+                    const StatusMQ::Descriptor& statusMQ) {
                 retval = r;
                 if (retval == Result::OK) {
                     tempDataMQ.reset(new DataMQ(dataMQ));
@@ -435,15 +444,14 @@
 status_t StreamOutHalHidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
     if (mStream == 0) return NO_INIT;
     if (mGetPresentationPositionNotSupported) return INVALID_OPERATION;
-    struct timespec timeNow;
-    clock_gettime(CLOCK_MONOTONIC, &timeNow);
-    uint64_t timeStampNow = timeNow.tv_sec * 1000000 + timeNow.tv_nsec / 1000;
-    if (timeStampNow - mPPosFromWriteObtained <= 1000) {
+    if (getCurrentTimeMs() - mPPosFromWrite.obtained <= 1000) {
         // No more than 1 ms passed since the last write, use cached result to avoid binder calls.
-        *frames = mPPosFromWriteFrames;
-        timestamp->tv_sec = mPPosFromWriteTS.tv_sec;
-        timestamp->tv_nsec = mPPosFromWriteTS.tv_nsec;
-        return OK;
+        if (mPPosFromWrite.status == OK) {
+            *frames = mPPosFromWrite.frames;
+            timestamp->tv_sec = mPPosFromWrite.ts.tv_sec;
+            timestamp->tv_nsec = mPPosFromWrite.ts.tv_nsec;
+        }
+        return mPPosFromWrite.status;
     }
 
     Result retval;
@@ -559,8 +567,8 @@
     Return<void> ret = mStream->prepareForReading(
             1, bufferSize, ThreadPriority(mHalThreadPriority),
             [&](Result r,
-                    const MQDescriptorSync<uint8_t>& dataMQ,
-                    const MQDescriptorSync<ReadStatus>& statusMQ) {
+                    const DataMQ::Descriptor& dataMQ,
+                    const StatusMQ::Descriptor& statusMQ) {
                 retval = r;
                 if (retval == Result::OK) {
                     tempDataMQ.reset(new DataMQ(dataMQ));
diff --git a/media/libaudiohal/StreamHalHidl.h b/media/libaudiohal/StreamHalHidl.h
index 0093957..8b5867e 100644
--- a/media/libaudiohal/StreamHalHidl.h
+++ b/media/libaudiohal/StreamHalHidl.h
@@ -164,15 +164,19 @@
     std::unique_ptr<StatusMQ> mStatusMQ;
     EventFlag* mEfGroup;
     bool mGetPresentationPositionNotSupported;
-    uint64_t mPPosFromWriteObtained;
-    uint64_t mPPosFromWriteFrames;
-    struct timespec mPPosFromWriteTS;
+    struct {
+        uint64_t obtained;
+        status_t status;
+        uint64_t frames;
+        struct timespec ts;
+    } mPPosFromWrite;
 
     // Can not be constructed directly by clients.
     StreamOutHalHidl(const sp<IStreamOut>& stream);
 
     virtual ~StreamOutHalHidl();
 
+    uint64_t getCurrentTimeMs();
     status_t prepareForWriting(size_t bufferSize);
 };
 
diff --git a/media/libaudioprocessing/Android.mk b/media/libaudioprocessing/Android.mk
index d47d158..b7ea99e 100644
--- a/media/libaudioprocessing/Android.mk
+++ b/media/libaudioprocessing/Android.mk
@@ -9,6 +9,7 @@
     AudioResamplerSinc.cpp.arm \
     AudioResamplerDyn.cpp.arm \
     BufferProviders.cpp \
+    RecordBufferConverter.cpp \
 
 LOCAL_C_INCLUDES := \
     $(TOP) \
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index 11ec367..862fef6 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -142,9 +142,9 @@
             audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
             bufferFrameCount)  // set bufferFrameCount to 0 to do in-place
 {
-    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
+    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d %d)",
             this, inputChannelMask, outputChannelMask, format,
-            sampleRate, sessionId);
+            sampleRate, sessionId, (int)bufferFrameCount);
     if (!sIsMultichannelCapable) {
         ALOGE("DownmixerBufferProvider() error: not multichannel capable");
         return;
@@ -178,11 +178,13 @@
              EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
      mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
 
+     mInFrameSize =
+             audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask);
+     mOutFrameSize =
+             audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask);
      status_t status;
      status = EffectBufferHalInterface::mirror(
-             nullptr,
-             audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
-             &mInBuffer);
+             nullptr, mInFrameSize * bufferFrameCount, &mInBuffer);
      if (status != 0) {
          ALOGE("DownmixerBufferProvider() error %d while creating input buffer", status);
          mDownmixInterface.clear();
@@ -190,9 +192,7 @@
          return;
      }
      status = EffectBufferHalInterface::mirror(
-             nullptr,
-             audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
-             &mOutBuffer);
+             nullptr, mOutFrameSize * bufferFrameCount, &mOutBuffer);
      if (status != 0) {
          ALOGE("DownmixerBufferProvider() error %d while creating output buffer", status);
          mInBuffer.clear();
@@ -200,6 +200,8 @@
          mEffectsFactory.clear();
          return;
      }
+     mDownmixInterface->setInBuffer(mInBuffer);
+     mDownmixInterface->setOutBuffer(mOutBuffer);
 
      int cmdStatus;
      uint32_t replySize = sizeof(int);
@@ -275,14 +277,18 @@
 {
     mInBuffer->setExternalData(const_cast<void*>(src));
     mInBuffer->setFrameCount(frames);
-    mInBuffer->update();
-    mOutBuffer->setExternalData(dst);
+    mInBuffer->update(mInFrameSize * frames);
     mOutBuffer->setFrameCount(frames);
-    mOutBuffer->update();
+    mOutBuffer->setExternalData(dst);
+    if (dst != src) {
+        // Downmix may be accumulating, need to populate the output buffer
+        // with the dst data.
+        mOutBuffer->update(mOutFrameSize * frames);
+    }
     // may be in-place if src == dst.
     status_t res = mDownmixInterface->process();
     if (res == OK) {
-        mOutBuffer->commit();
+        mOutBuffer->commit(mOutFrameSize * frames);
     } else {
         ALOGE("DownmixBufferProvider error %d", res);
     }
diff --git a/media/libaudioprocessing/RecordBufferConverter.cpp b/media/libaudioprocessing/RecordBufferConverter.cpp
new file mode 100644
index 0000000..54151f5
--- /dev/null
+++ b/media/libaudioprocessing/RecordBufferConverter.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "RecordBufferConverter"
+//#define LOG_NDEBUG 0
+
+#include <audio_utils/primitives.h>
+#include <audio_utils/format.h>
+#include <media/AudioMixer.h>  // for UNITY_GAIN_FLOAT
+#include <media/AudioResampler.h>
+#include <media/BufferProviders.h>
+#include <media/RecordBufferConverter.h>
+#include <utils/Log.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
+template <typename T>
+static inline T max(const T& a, const T& b)
+{
+    return a > b ? a : b;
+}
+
+namespace android {
+
+RecordBufferConverter::RecordBufferConverter(
+        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+        uint32_t srcSampleRate,
+        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+        uint32_t dstSampleRate) :
+            mSrcChannelMask(AUDIO_CHANNEL_INVALID), // updateParameters will set following vars
+            // mSrcFormat
+            // mSrcSampleRate
+            // mDstChannelMask
+            // mDstFormat
+            // mDstSampleRate
+            // mSrcChannelCount
+            // mDstChannelCount
+            // mDstFrameSize
+            mBuf(NULL), mBufFrames(0), mBufFrameSize(0),
+            mResampler(NULL),
+            mIsLegacyDownmix(false),
+            mIsLegacyUpmix(false),
+            mRequiresFloat(false),
+            mInputConverterProvider(NULL)
+{
+    (void)updateParameters(srcChannelMask, srcFormat, srcSampleRate,
+            dstChannelMask, dstFormat, dstSampleRate);
+}
+
+RecordBufferConverter::~RecordBufferConverter() {
+    free(mBuf);
+    delete mResampler;
+    delete mInputConverterProvider;
+}
+
+void RecordBufferConverter::reset() {
+    if (mResampler != NULL) {
+        mResampler->reset();
+    }
+}
+
+size_t RecordBufferConverter::convert(void *dst,
+        AudioBufferProvider *provider, size_t frames)
+{
+    if (mInputConverterProvider != NULL) {
+        mInputConverterProvider->setBufferProvider(provider);
+        provider = mInputConverterProvider;
+    }
+
+    if (mResampler == NULL) {
+        ALOGV("NO RESAMPLING sampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
+                mSrcSampleRate, mSrcFormat, mDstFormat);
+
+        AudioBufferProvider::Buffer buffer;
+        for (size_t i = frames; i > 0; ) {
+            buffer.frameCount = i;
+            status_t status = provider->getNextBuffer(&buffer);
+            if (status != OK || buffer.frameCount == 0) {
+                frames -= i; // cannot fill request.
+                break;
+            }
+            // format convert to destination buffer
+            convertNoResampler(dst, buffer.raw, buffer.frameCount);
+
+            dst = (int8_t*)dst + buffer.frameCount * mDstFrameSize;
+            i -= buffer.frameCount;
+            provider->releaseBuffer(&buffer);
+        }
+    } else {
+         ALOGV("RESAMPLING mSrcSampleRate:%u mDstSampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
+                 mSrcSampleRate, mDstSampleRate, mSrcFormat, mDstFormat);
+
+         // reallocate buffer if needed
+         if (mBufFrameSize != 0 && mBufFrames < frames) {
+             free(mBuf);
+             mBufFrames = frames;
+             (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
+         }
+        // resampler accumulates, but we only have one source track
+        memset(mBuf, 0, frames * mBufFrameSize);
+        frames = mResampler->resample((int32_t*)mBuf, frames, provider);
+        // format convert to destination buffer
+        convertResampler(dst, mBuf, frames);
+    }
+    return frames;
+}
+
+status_t RecordBufferConverter::updateParameters(
+        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+        uint32_t srcSampleRate,
+        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+        uint32_t dstSampleRate)
+{
+    // quick evaluation if there is any change.
+    if (mSrcFormat == srcFormat
+            && mSrcChannelMask == srcChannelMask
+            && mSrcSampleRate == srcSampleRate
+            && mDstFormat == dstFormat
+            && mDstChannelMask == dstChannelMask
+            && mDstSampleRate == dstSampleRate) {
+        return NO_ERROR;
+    }
+
+    ALOGV("RecordBufferConverter updateParameters srcMask:%#x dstMask:%#x"
+            "  srcFormat:%#x dstFormat:%#x  srcRate:%u dstRate:%u",
+            srcChannelMask, dstChannelMask, srcFormat, dstFormat, srcSampleRate, dstSampleRate);
+    const bool valid =
+            audio_is_input_channel(srcChannelMask)
+            && audio_is_input_channel(dstChannelMask)
+            && audio_is_valid_format(srcFormat) && audio_is_linear_pcm(srcFormat)
+            && audio_is_valid_format(dstFormat) && audio_is_linear_pcm(dstFormat)
+            && (srcSampleRate <= dstSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX)
+            ; // no upsampling checks for now
+    if (!valid) {
+        return BAD_VALUE;
+    }
+
+    mSrcFormat = srcFormat;
+    mSrcChannelMask = srcChannelMask;
+    mSrcSampleRate = srcSampleRate;
+    mDstFormat = dstFormat;
+    mDstChannelMask = dstChannelMask;
+    mDstSampleRate = dstSampleRate;
+
+    // compute derived parameters
+    mSrcChannelCount = audio_channel_count_from_in_mask(srcChannelMask);
+    mDstChannelCount = audio_channel_count_from_in_mask(dstChannelMask);
+    mDstFrameSize = mDstChannelCount * audio_bytes_per_sample(mDstFormat);
+
+    // do we need to resample?
+    delete mResampler;
+    mResampler = NULL;
+    if (mSrcSampleRate != mDstSampleRate) {
+        mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_FLOAT,
+                mSrcChannelCount, mDstSampleRate);
+        mResampler->setSampleRate(mSrcSampleRate);
+        mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
+    }
+
+    // are we running legacy channel conversion modes?
+    mIsLegacyDownmix = (mSrcChannelMask == AUDIO_CHANNEL_IN_STEREO
+                            || mSrcChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK)
+                   && mDstChannelMask == AUDIO_CHANNEL_IN_MONO;
+    mIsLegacyUpmix = mSrcChannelMask == AUDIO_CHANNEL_IN_MONO
+                   && (mDstChannelMask == AUDIO_CHANNEL_IN_STEREO
+                            || mDstChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK);
+
+    // do we need to process in float?
+    mRequiresFloat = mResampler != NULL || mIsLegacyDownmix || mIsLegacyUpmix;
+
+    // do we need a staging buffer to convert for destination (we can still optimize this)?
+    // we use mBufFrameSize > 0 to indicate both frame size as well as buffer necessity
+    if (mResampler != NULL) {
+        mBufFrameSize = max(mSrcChannelCount, (uint32_t)FCC_2)
+                * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
+    } else if (mIsLegacyUpmix || mIsLegacyDownmix) { // legacy modes always float
+        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
+    } else if (mSrcChannelMask != mDstChannelMask && mDstFormat != mSrcFormat) {
+        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(mSrcFormat);
+    } else {
+        mBufFrameSize = 0;
+    }
+    mBufFrames = 0; // force the buffer to be resized.
+
+    // do we need an input converter buffer provider to give us float?
+    delete mInputConverterProvider;
+    mInputConverterProvider = NULL;
+    if (mRequiresFloat && mSrcFormat != AUDIO_FORMAT_PCM_FLOAT) {
+        mInputConverterProvider = new ReformatBufferProvider(
+                audio_channel_count_from_in_mask(mSrcChannelMask),
+                mSrcFormat,
+                AUDIO_FORMAT_PCM_FLOAT,
+                256 /* provider buffer frame count */);
+    }
+
+    // do we need a remixer to do channel mask conversion
+    if (!mIsLegacyDownmix && !mIsLegacyUpmix && mSrcChannelMask != mDstChannelMask) {
+        (void) memcpy_by_index_array_initialization_from_channel_mask(
+                mIdxAry, ARRAY_SIZE(mIdxAry), mDstChannelMask, mSrcChannelMask);
+    }
+    return NO_ERROR;
+}
+
+void RecordBufferConverter::convertNoResampler(
+        void *dst, const void *src, size_t frames)
+{
+    // src is native type unless there is legacy upmix or downmix, whereupon it is float.
+    if (mBufFrameSize != 0 && mBufFrames < frames) {
+        free(mBuf);
+        mBufFrames = frames;
+        (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
+    }
+    // do we need to do legacy upmix and downmix?
+    if (mIsLegacyUpmix || mIsLegacyDownmix) {
+        void *dstBuf = mBuf != NULL ? mBuf : dst;
+        if (mIsLegacyUpmix) {
+            upmix_to_stereo_float_from_mono_float((float *)dstBuf,
+                    (const float *)src, frames);
+        } else /*mIsLegacyDownmix */ {
+            downmix_to_mono_float_from_stereo_float((float *)dstBuf,
+                    (const float *)src, frames);
+        }
+        if (mBuf != NULL) {
+            memcpy_by_audio_format(dst, mDstFormat, mBuf, AUDIO_FORMAT_PCM_FLOAT,
+                    frames * mDstChannelCount);
+        }
+        return;
+    }
+    // do we need to do channel mask conversion?
+    if (mSrcChannelMask != mDstChannelMask) {
+        void *dstBuf = mBuf != NULL ? mBuf : dst;
+        memcpy_by_index_array(dstBuf, mDstChannelCount,
+                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mSrcFormat), frames);
+        if (dstBuf == dst) {
+            return; // format is the same
+        }
+    }
+    // convert to destination buffer
+    const void *convertBuf = mBuf != NULL ? mBuf : src;
+    memcpy_by_audio_format(dst, mDstFormat, convertBuf, mSrcFormat,
+            frames * mDstChannelCount);
+}
+
+void RecordBufferConverter::convertResampler(
+        void *dst, /*not-a-const*/ void *src, size_t frames)
+{
+    // src buffer format is ALWAYS float when entering this routine
+    if (mIsLegacyUpmix) {
+        ; // mono to stereo already handled by resampler
+    } else if (mIsLegacyDownmix
+            || (mSrcChannelMask == mDstChannelMask && mSrcChannelCount == 1)) {
+        // the resampler outputs stereo for mono input channel (a feature?)
+        // must convert to mono
+        downmix_to_mono_float_from_stereo_float((float *)src,
+                (const float *)src, frames);
+    } else if (mSrcChannelMask != mDstChannelMask) {
+        // convert to mono channel again for channel mask conversion (could be skipped
+        // with further optimization).
+        if (mSrcChannelCount == 1) {
+            downmix_to_mono_float_from_stereo_float((float *)src,
+                (const float *)src, frames);
+        }
+        // convert to destination format (in place, OK as float is larger than other types)
+        if (mDstFormat != AUDIO_FORMAT_PCM_FLOAT) {
+            memcpy_by_audio_format(src, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
+                    frames * mSrcChannelCount);
+        }
+        // channel convert and save to dst
+        memcpy_by_index_array(dst, mDstChannelCount,
+                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mDstFormat), frames);
+        return;
+    }
+    // convert to destination format and save to dst
+    memcpy_by_audio_format(dst, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
+            frames * mDstChannelCount);
+}
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/media/libaudioprocessing/tests/resampler_tests.cpp b/media/libaudioprocessing/tests/resampler_tests.cpp
index 8d5e016..a23c000 100644
--- a/media/libaudioprocessing/tests/resampler_tests.cpp
+++ b/media/libaudioprocessing/tests/resampler_tests.cpp
@@ -32,8 +32,8 @@
 #include <utility>
 #include <vector>
 
-#include <android/log.h>
 #include <gtest/gtest.h>
+#include <log/log.h>
 #include <media/AudioBufferProvider.h>
 
 #include <media/AudioResampler.h>
diff --git a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
index 19d408d..9d29cf1 100644
--- a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
+++ b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
@@ -19,12 +19,13 @@
 
 #include <assert.h>
 #include <math.h>
-#include <new>
 #include <stdlib.h>
 #include <string.h>
 #include <time.h>
 
-#include <android/log.h>
+#include <new>
+
+#include <log/log.h>
 
 #include <audio_effects/effect_loudnessenhancer.h>
 #include "dsp/core/dynamic_range_compression.h"
diff --git a/media/libeffects/testlibs/EffectEqualizer.cpp b/media/libeffects/testlibs/EffectEqualizer.cpp
index f5e11a6..db4d009 100644
--- a/media/libeffects/testlibs/EffectEqualizer.cpp
+++ b/media/libeffects/testlibs/EffectEqualizer.cpp
@@ -22,9 +22,10 @@
 #include <assert.h>
 #include <stdlib.h>
 #include <string.h>
+
 #include <new>
 
-#include <android/log.h>
+#include <log/log.h>
 
 #include "AudioEqualizer.h"
 #include "AudioBiquadFilter.h"
diff --git a/media/libeffects/testlibs/EffectReverb.c b/media/libeffects/testlibs/EffectReverb.c
index 08bf9ae..fce9bed 100644
--- a/media/libeffects/testlibs/EffectReverb.c
+++ b/media/libeffects/testlibs/EffectReverb.c
@@ -21,7 +21,7 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include <android/log.h>
+#include <log/log.h>
 
 #include "EffectReverb.h"
 #include "EffectsMath.h"
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 8fff414..6c6c369 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -57,10 +57,11 @@
     StringArray.cpp \
 
 LOCAL_SHARED_LIBRARIES := \
-	libui liblog libcutils libutils libbinder libsonivox libicuuc libicui18n libexpat \
+        libui liblog libcutils libutils libbinder libsonivox libicuuc libicui18n libexpat \
         libcamera_client libstagefright_foundation \
         libgui libdl libaudioutils libaudioclient \
-        libmedia_helper
+        libmedia_helper \
+        libhidlbase \
 
 LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbinder libsonivox
 
@@ -72,6 +73,7 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
 
 LOCAL_C_INCLUDES := \
+    $(TOP)/system/libhidl/base/include \
     $(TOP)/frameworks/native/include/media/openmax \
     $(TOP)/frameworks/av/include/media/ \
     $(TOP)/frameworks/av/media/libstagefright \
@@ -83,7 +85,8 @@
     frameworks/av/media/libmedia/aidl
 
 LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
-LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libmedia/BufferingSettings.cpp b/media/libmedia/BufferingSettings.cpp
index 6dc4a53..a69497e 100644
--- a/media/libmedia/BufferingSettings.cpp
+++ b/media/libmedia/BufferingSettings.cpp
@@ -28,6 +28,16 @@
     return (mode >= BUFFERING_MODE_NONE && mode < BUFFERING_MODE_COUNT);
 }
 
+// static
+bool BufferingSettings::IsTimeBasedBufferingMode(int mode) {
+    return (mode == BUFFERING_MODE_TIME_ONLY || mode == BUFFERING_MODE_TIME_THEN_SIZE);
+}
+
+// static
+bool BufferingSettings::IsSizeBasedBufferingMode(int mode) {
+    return (mode == BUFFERING_MODE_SIZE_ONLY || mode == BUFFERING_MODE_TIME_THEN_SIZE);
+}
+
 BufferingSettings::BufferingSettings()
         : mInitialBufferingMode(BUFFERING_MODE_NONE),
           mRebufferingMode(BUFFERING_MODE_NONE),
@@ -70,4 +80,15 @@
     return OK;
 }
 
+String8 BufferingSettings::toString() const {
+    String8 s;
+    s.appendFormat("initialMode(%d), rebufferingMode(%d), "
+            "initialMarks(%d ms, %d KB), rebufferingMarks(%d, %d)ms, (%d, %d)KB",
+            mInitialBufferingMode, mRebufferingMode,
+            mInitialWatermarkMs, mInitialWatermarkKB,
+            mRebufferingWatermarkLowMs, mRebufferingWatermarkHighMs,
+            mRebufferingWatermarkLowKB, mRebufferingWatermarkHighKB);
+    return s;
+}
+
 } // namespace android
diff --git a/media/libmedia/IMediaAnalyticsService.cpp b/media/libmedia/IMediaAnalyticsService.cpp
index cc4aa35..340cf19 100644
--- a/media/libmedia/IMediaAnalyticsService.cpp
+++ b/media/libmedia/IMediaAnalyticsService.cpp
@@ -50,7 +50,6 @@
 enum {
     GENERATE_UNIQUE_SESSIONID = IBinder::FIRST_CALL_TRANSACTION,
     SUBMIT_ITEM,
-    GET_ITEM_LIST,
 };
 
 class BpMediaAnalyticsService: public BpInterface<IMediaAnalyticsService>
@@ -115,45 +114,6 @@
         return sessionid;
     }
 
-    virtual List<MediaAnalyticsItem*> *getMediaAnalyticsItemList(bool finished, nsecs_t ts)
-    {
-            return getMediaAnalyticsItemList(finished, ts, MediaAnalyticsItem::kKeyAny);
-    }
-
-    virtual List<MediaAnalyticsItem*> *getMediaAnalyticsItemList(bool finished, nsecs_t ts, MediaAnalyticsItem::Key key)
-    {
-        Parcel data, reply;
-        status_t err;
-
-        data.writeInterfaceToken(IMediaAnalyticsService::getInterfaceDescriptor());
-        data.writeInt32(finished);
-        data.writeInt64(ts);
-        const char *str = key.c_str();
-        if (key.empty()) {
-            str = MediaAnalyticsItem::kKeyNone.c_str();
-        }
-        data.writeCString(str);
-        err = remote()->transact(GET_ITEM_LIST, data, &reply);
-        if (err != NO_ERROR) {
-            return NULL;
-        }
-
-        // read a count
-        int32_t count = reply.readInt32();
-        List<MediaAnalyticsItem*> *list = NULL;
-
-        if (count > 0) {
-            list = new List<MediaAnalyticsItem*>();
-            for (int i=0;i<count;i++) {
-                MediaAnalyticsItem *item = new MediaAnalyticsItem();
-                // XXX: watch for failures here
-                item->readFromParcel(reply);
-                list->push_back(item);
-            }
-        }
-
-        return list;
-    }
 };
 
 IMPLEMENT_META_INTERFACE(MediaAnalyticsService, "android.media.IMediaAnalyticsService");
@@ -204,33 +164,6 @@
             return NO_ERROR;
         } break;
 
-        case GET_ITEM_LIST: {
-            CHECK_INTERFACE(IMediaPlayerService, data, reply);
-            // get the parameters
-            bool finished = data.readInt32();
-            nsecs_t ts = data.readInt64();
-            MediaAnalyticsItem::Key key = data.readCString();
-
-            // find the (0 or more) items
-            List<MediaAnalyticsItem*> *list =  getMediaAnalyticsItemList(finished, ts, key);
-            // encapsulate/serialize them
-            reply->writeInt32(list->size());
-            if (list->size() > 0) {
-                    for (List<MediaAnalyticsItem*>::iterator it = list->begin();
-                         it != list->end(); it++) {
-                            (*it)->writeToParcel(reply);
-                    }
-
-
-            }
-
-            // avoid leakiness; organized discarding of list and its contents
-            list->clear();
-            delete list;
-
-            return NO_ERROR;
-        } break;
-
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index 9ffde4e..966267a 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -70,8 +70,28 @@
     SET_RETRANSMIT_ENDPOINT,
     GET_RETRANSMIT_ENDPOINT,
     SET_NEXT_PLAYER,
+    // ModDrm
+    PREPARE_DRM,
+    RELEASE_DRM,
+    GET_KEY_REQUEST,
+    PROVIDE_KEY_RESPONSE,
+    RESTORE_KEYS,
+    GET_DRM_PROPERTY_STRING,
+    SET_DRM_PROPERTY_STRING,
 };
 
+// ModDrm helpers
+static void readVector(const Parcel& reply, Vector<uint8_t>& vector) {
+    uint32_t size = reply.readUint32();
+    vector.insertAt((size_t)0, size);
+    reply.read(vector.editArray(), size);
+}
+
+static void writeVector(Parcel& data, Vector<uint8_t> const& vector) {
+    data.writeUint32(vector.size());
+    data.write(vector.array(), vector.size());
+}
+
 class BpMediaPlayer: public BpInterface<IMediaPlayer>
 {
 public:
@@ -447,6 +467,137 @@
 
         return err;
     }
+
+    // ModDrm
+    status_t prepareDrm(const uint8_t uuid[16], const int mode)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+        data.write(uuid, 16);
+        data.writeInt32(mode);
+
+        status_t status = remote()->transact(PREPARE_DRM, data, &reply);
+        if (status != OK) {
+            ALOGE("prepareDrm: binder call failed: %d", status);
+            return status;
+        }
+
+        return reply.readInt32();
+    }
+
+    status_t releaseDrm()
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+        status_t status = remote()->transact(RELEASE_DRM, data, &reply);
+        if (status != OK) {
+            ALOGE("releaseDrm: binder call failed: %d", status);
+            return status;
+        }
+
+        return reply.readInt32();
+    }
+
+    status_t getKeyRequest(Vector<uint8_t> const& scope, String8 const& mimeType,
+            DrmPlugin::KeyType keyType, KeyedVector<String8, String8>& optionalParameters,
+            Vector<uint8_t>& request, String8& defaultUrl,
+            DrmPlugin::KeyRequestType& keyRequestType)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+        writeVector(data, scope);
+        data.writeString8(mimeType);
+        data.writeInt32((int32_t)keyType);
+
+        data.writeUint32(optionalParameters.size());
+        for (size_t i = 0; i < optionalParameters.size(); ++i) {
+            data.writeString8(optionalParameters.keyAt(i));
+            data.writeString8(optionalParameters.valueAt(i));
+        }
+
+        status_t status = remote()->transact(GET_KEY_REQUEST, data, &reply);
+        if (status != OK) {
+            ALOGE("getKeyRequest: binder call failed: %d", status);
+            return status;
+        }
+
+        readVector(reply, request);
+        defaultUrl = reply.readString8();
+        keyRequestType = (DrmPlugin::KeyRequestType)reply.readInt32();
+
+        return reply.readInt32();
+    }
+
+    status_t provideKeyResponse(Vector<uint8_t>& releaseKeySetId, Vector<uint8_t>& response,
+            Vector<uint8_t> &keySetId)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+        writeVector(data, releaseKeySetId);
+        writeVector(data, response);
+
+        status_t status = remote()->transact(PROVIDE_KEY_RESPONSE, data, &reply);
+        if (status != OK) {
+            ALOGE("provideKeyResponse: binder call failed: %d", status);
+            return status;
+        }
+
+        readVector(reply, keySetId);
+
+        return reply.readInt32();
+    }
+
+    status_t restoreKeys(Vector<uint8_t> const& keySetId)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+        writeVector(data, keySetId);
+
+        status_t status = remote()->transact(RESTORE_KEYS, data, &reply);
+        if (status != OK) {
+            ALOGE("restoreKeys: binder call failed: %d", status);
+            return status;
+        }
+
+        return reply.readInt32();
+    }
+
+    status_t getDrmPropertyString(String8 const& name, String8& value)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+        data.writeString8(name);
+        status_t status = remote()->transact(GET_DRM_PROPERTY_STRING, data, &reply);
+        if (status != OK) {
+            ALOGE("getDrmPropertyString: binder call failed: %d", status);
+            return status;
+        }
+
+        value = reply.readString8();
+        return reply.readInt32();
+    }
+
+    status_t setDrmPropertyString(String8 const& name, String8 const& value)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+        data.writeString8(name);
+        data.writeString8(value);
+        status_t status = remote()->transact(SET_DRM_PROPERTY_STRING, data, &reply);
+        if (status != OK) {
+            ALOGE("setDrmPropertyString: binder call failed: %d", status);
+            return status;
+        }
+
+        return reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(MediaPlayer, "android.media.IMediaPlayer");
@@ -741,6 +892,93 @@
 
             return NO_ERROR;
         } break;
+
+        // ModDrm
+        case PREPARE_DRM: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            uint8_t uuid[16];
+            data.read(uuid, sizeof(uuid));
+
+            int mode = data.readInt32();
+
+            uint32_t result = prepareDrm(uuid, mode);
+            reply->writeInt32(result);
+            return OK;
+        }
+        case RELEASE_DRM: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+
+            uint32_t result = releaseDrm();
+            reply->writeInt32(result);
+            return OK;
+        }
+        case GET_KEY_REQUEST: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+
+            Vector<uint8_t> scope;
+            readVector(data, scope);
+            String8 mimeType = data.readString8();
+            DrmPlugin::KeyType keyType = (DrmPlugin::KeyType)data.readInt32();
+
+            KeyedVector<String8, String8> optionalParameters;
+            uint32_t count = data.readUint32();
+            for (size_t i = 0; i < count; ++i) {
+                String8 key, value;
+                key = data.readString8();
+                value = data.readString8();
+                optionalParameters.add(key, value);
+            }
+
+            Vector<uint8_t> request;
+            String8 defaultUrl;
+            DrmPlugin::KeyRequestType keyRequestType = DrmPlugin::kKeyRequestType_Unknown;
+
+            status_t result = getKeyRequest(scope, mimeType, keyType, optionalParameters,
+                                      request, defaultUrl, keyRequestType);
+
+            writeVector(*reply, request);
+            reply->writeString8(defaultUrl);
+            reply->writeInt32(keyRequestType);
+            reply->writeInt32(result);
+            return OK;
+        }
+        case PROVIDE_KEY_RESPONSE: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            Vector<uint8_t> releaseKeySetId, response, keySetId;
+            readVector(data, releaseKeySetId);
+            readVector(data, response);
+            uint32_t result = provideKeyResponse(releaseKeySetId, response, keySetId);
+            writeVector(*reply, keySetId);
+            reply->writeInt32(result);
+            return OK;
+        }
+        case RESTORE_KEYS: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+
+            Vector<uint8_t> keySetId;
+            readVector(data, keySetId);
+            uint32_t result = restoreKeys(keySetId);
+            reply->writeInt32(result);
+            return OK;
+        }
+        case GET_DRM_PROPERTY_STRING: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            String8 name, value;
+            name = data.readString8();
+            uint32_t result = getDrmPropertyString(name, value);
+            reply->writeString8(value);
+            reply->writeInt32(result);
+            return OK;
+        }
+        case SET_DRM_PROPERTY_STRING: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            String8 name, value;
+            name = data.readString8();
+            value = data.readString8();
+            uint32_t result = setDrmPropertyString(name, value);
+            reply->writeInt32(result);
+            return OK;
+        }
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index 5599830..5730c76 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -50,6 +50,7 @@
     SET_VIDEO_ENCODER,
     SET_AUDIO_ENCODER,
     SET_OUTPUT_FILE_FD,
+    SET_NEXT_OUTPUT_FILE_FD,
     SET_VIDEO_SIZE,
     SET_VIDEO_FRAMERATE,
     SET_PARAMETERS,
@@ -172,17 +173,24 @@
         return reply.readInt32();
     }
 
-    status_t setOutputFile(int fd, int64_t offset, int64_t length) {
-        ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
+    status_t setOutputFile(int fd) {
+        ALOGV("setOutputFile(%d)", fd);
         Parcel data, reply;
         data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
         data.writeFileDescriptor(fd);
-        data.writeInt64(offset);
-        data.writeInt64(length);
         remote()->transact(SET_OUTPUT_FILE_FD, data, &reply);
         return reply.readInt32();
     }
 
+    status_t setNextOutputFile(int fd) {
+        ALOGV("setNextOutputFile(%d)", fd);
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+        data.writeFileDescriptor(fd);
+        remote()->transact(SET_NEXT_OUTPUT_FILE_FD, data, &reply);
+        return reply.readInt32();
+    }
+
     status_t setVideoSize(int width, int height)
     {
         ALOGV("setVideoSize(%dx%d)", width, height);
@@ -429,9 +437,15 @@
             ALOGV("SET_OUTPUT_FILE_FD");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
             int fd = dup(data.readFileDescriptor());
-            int64_t offset = data.readInt64();
-            int64_t length = data.readInt64();
-            reply->writeInt32(setOutputFile(fd, offset, length));
+            reply->writeInt32(setOutputFile(fd));
+            ::close(fd);
+            return NO_ERROR;
+        } break;
+        case SET_NEXT_OUTPUT_FILE_FD: {
+            ALOGV("SET_NEXT_OUTPUT_FILE_FD");
+            CHECK_INTERFACE(IMediaRecorder, data, reply);
+            int fd = dup(data.readFileDescriptor());
+            reply->writeInt32(setNextOutputFile(fd));
             ::close(fd);
             return NO_ERROR;
         } break;
diff --git a/media/libmedia/MediaAnalyticsItem.cpp b/media/libmedia/MediaAnalyticsItem.cpp
index 76397c7..229e747 100644
--- a/media/libmedia/MediaAnalyticsItem.cpp
+++ b/media/libmedia/MediaAnalyticsItem.cpp
@@ -49,15 +49,15 @@
 const MediaAnalyticsItem::Key MediaAnalyticsItem::kKeyAny  = "any";
 const MediaAnalyticsItem::Key MediaAnalyticsItem::kKeyNone  = "none";
 
-const char * const MediaAnalyticsItem::EnabledProperty  = "media.analytics.enabled";
-const char * const MediaAnalyticsItem::EnabledPropertyPersist  = "persist.media.analytics.enabled";
-const int MediaAnalyticsItem::EnabledProperty_default  = 0;
+const char * const MediaAnalyticsItem::EnabledProperty  = "media.metrics.enabled";
+const char * const MediaAnalyticsItem::EnabledPropertyPersist  = "persist.media.metrics.enabled";
+const int MediaAnalyticsItem::EnabledProperty_default  = 1;
 
 
 // access functions for the class
 MediaAnalyticsItem::MediaAnalyticsItem()
-    : mPid(0),
-      mUid(0),
+    : mPid(-1),
+      mUid(-1),
       mSessionID(MediaAnalyticsItem::SessionIDNone),
       mTimestamp(0),
       mFinalized(0),
@@ -67,8 +67,8 @@
 }
 
 MediaAnalyticsItem::MediaAnalyticsItem(MediaAnalyticsItem::Key key)
-    : mPid(0),
-      mUid(0),
+    : mPid(-1),
+      mUid(-1),
       mSessionID(MediaAnalyticsItem::SessionIDNone),
       mTimestamp(0),
       mFinalized(0),
@@ -92,6 +92,9 @@
     // clean allocated storage from key
     mKey.clear();
 
+    // clean various major parameters
+    mSessionID = MediaAnalyticsItem::SessionIDNone;
+
     // clean attributes
     // contents of the attributes
     for (size_t i = 0 ; i < mPropSize; i++ ) {
@@ -646,7 +649,6 @@
 }
 
 
-
 AString MediaAnalyticsItem::toString() {
 
     AString result = "(";
@@ -763,7 +765,7 @@
 
 //static
 sp<IMediaAnalyticsService> MediaAnalyticsItem::getInstance() {
-    static const char *servicename = "media.analytics";
+    static const char *servicename = "media.metrics";
     static int tries_remaining = SVC_TRIES;
     int enabled = isEnabled();
 
diff --git a/media/libmedia/OMXBuffer.cpp b/media/libmedia/OMXBuffer.cpp
index 914cd5b..71d2908 100644
--- a/media/libmedia/OMXBuffer.cpp
+++ b/media/libmedia/OMXBuffer.cpp
@@ -17,6 +17,7 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "OMXBuffer"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/MediaCodecBuffer.h>
 #include <media/OMXBuffer.h>
 #include <binder/IMemory.h>
@@ -35,11 +36,13 @@
 
 OMXBuffer::OMXBuffer(const sp<MediaCodecBuffer>& codecBuffer)
     : mBufferType(kBufferTypePreset),
+      mRangeOffset(codecBuffer != NULL ? codecBuffer->offset() : 0),
       mRangeLength(codecBuffer != NULL ? codecBuffer->size() : 0) {
 }
 
-OMXBuffer::OMXBuffer(OMX_U32 rangeLength)
+OMXBuffer::OMXBuffer(OMX_U32 rangeOffset, OMX_U32 rangeLength)
     : mBufferType(kBufferTypePreset),
+      mRangeOffset(rangeOffset),
       mRangeLength(rangeLength) {
 }
 
@@ -58,15 +61,25 @@
       mNativeHandle(handle) {
 }
 
+OMXBuffer::OMXBuffer(const hidl_memory &hidlMemory)
+    : mBufferType(kBufferTypeHidlMemory),
+      mHidlMemory(hidlMemory) {
+}
+
 OMXBuffer::~OMXBuffer() {
 }
 
 status_t OMXBuffer::writeToParcel(Parcel *parcel) const {
+    CHECK(mBufferType != kBufferTypeHidlMemory);
     parcel->writeInt32(mBufferType);
 
     switch(mBufferType) {
         case kBufferTypePreset:
         {
+            status_t err = parcel->writeUint32(mRangeOffset);
+            if (err != OK) {
+                return err;
+            }
             return parcel->writeUint32(mRangeLength);
         }
 
@@ -93,11 +106,19 @@
 
 status_t OMXBuffer::readFromParcel(const Parcel *parcel) {
     BufferType bufferType = (BufferType) parcel->readInt32();
+    CHECK(bufferType != kBufferTypeHidlMemory);
 
     switch(bufferType) {
         case kBufferTypePreset:
         {
-            mRangeLength = parcel->readUint32();
+            status_t err = parcel->readUint32(&mRangeOffset);
+            if (err != OK) {
+                return err;
+            }
+            err = parcel->readUint32(&mRangeLength);
+            if (err != OK) {
+                return err;
+            }
             break;
         }
 
@@ -140,10 +161,12 @@
 
 OMXBuffer& OMXBuffer::operator=(OMXBuffer&& source) {
     mBufferType = std::move(source.mBufferType);
+    mRangeOffset = std::move(source.mRangeOffset);
     mRangeLength = std::move(source.mRangeLength);
     mMem = std::move(source.mMem);
     mGraphicBuffer = std::move(source.mGraphicBuffer);
     mNativeHandle = std::move(source.mNativeHandle);
+    mHidlMemory = std::move(source.mHidlMemory);
     return *this;
 }
 
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index c2b2688..8ce2b9f 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -186,6 +186,7 @@
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_SBC),
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX),
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX_HD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AC4),
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_LDAC),
     TERMINATOR
 };
@@ -211,6 +212,7 @@
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_MONO),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_6),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_CALL_MONO),
@@ -269,6 +271,48 @@
     TERMINATOR
 };
 
+template <>
+const UsageTypeConverter::Table UsageTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_UNKNOWN),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_MEDIA),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VOICE_COMMUNICATION),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ALARM),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_EVENT),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_SONIFICATION),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_GAME),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VIRTUAL_SOURCE),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_CNT),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_MAX),
+    TERMINATOR
+};
+
+template <>
+const SourceTypeConverter::Table SourceTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_DEFAULT),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_MIC),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_UPLINK),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_DOWNLINK),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_CALL),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_CAMCORDER),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_RECOGNITION),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_COMMUNICATION),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_REMOTE_SUBMIX),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_UNPROCESSED),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_CNT),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_MAX),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_FM_TUNER),
+    MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_HOTWORD),
+    TERMINATOR
+};
+
 template class TypeConverter<OutputDeviceTraits>;
 template class TypeConverter<InputDeviceTraits>;
 template class TypeConverter<OutputFlagTraits>;
@@ -280,6 +324,8 @@
 template class TypeConverter<GainModeTraits>;
 template class TypeConverter<StreamTraits>;
 template class TypeConverter<AudioModeTraits>;
+template class TypeConverter<UsageTraits>;
+template class TypeConverter<SourceTraits>;
 
 bool deviceFromString(const std::string& literalDevice, audio_devices_t& device) {
     return InputDeviceConverter::fromString(literalDevice, device) ||
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 699172b..2feb035 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -36,6 +36,7 @@
 #include <media/AudioSystem.h>
 #include <media/AVSyncSettings.h>
 #include <media/IDataSource.h>
+#include <media/MediaAnalyticsItem.h>
 
 #include <binder/MemoryBase.h>
 
@@ -136,8 +137,10 @@
         mPlayer = player;
         if (player != 0) {
             mCurrentState = MEDIA_PLAYER_INITIALIZED;
+            player->getDefaultBufferingSettings(&mCurrentBufferingSettings);
             err = NO_ERROR;
         } else {
+            mCurrentBufferingSettings = BufferingSettings();
             ALOGE("Unable to create media player");
         }
     }
@@ -244,6 +247,44 @@
     return mPlayer->setVideoSurfaceTexture(bufferProducer);
 }
 
+status_t MediaPlayer::getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */)
+{
+    ALOGV("getDefaultBufferingSettings");
+
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) {
+        return NO_INIT;
+    }
+    return mPlayer->getDefaultBufferingSettings(buffering);
+}
+
+status_t MediaPlayer::getBufferingSettings(BufferingSettings* buffering /* nonnull */)
+{
+    ALOGV("getBufferingSettings");
+
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) {
+        return NO_INIT;
+    }
+    *buffering = mCurrentBufferingSettings;
+    return NO_ERROR;
+}
+
+status_t MediaPlayer::setBufferingSettings(const BufferingSettings& buffering)
+{
+    ALOGV("setBufferingSettings");
+
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) {
+        return NO_INIT;
+    }
+    status_t err =  mPlayer->setBufferingSettings(buffering);
+    if (err == NO_ERROR) {
+        mCurrentBufferingSettings = buffering;
+    }
+    return err;
+}
+
 // must call with lock held
 status_t MediaPlayer::prepareAsync_l()
 {
@@ -584,6 +625,7 @@
         // setDataSource has to be called again to create a
         // new mediaplayer.
         mPlayer = 0;
+        mCurrentBufferingSettings = BufferingSettings();
         return ret;
     }
     clear_l();
@@ -769,7 +811,11 @@
     ALOGV("MediaPlayer::getParameter(%d)", key);
     Mutex::Autolock _l(mLock);
     if (mPlayer != NULL) {
-        return  mPlayer->getParameter(key, reply);
+        status_t status =  mPlayer->getParameter(key, reply);
+        if (status != OK) {
+            ALOGD("getParameter returns %d", status);
+        }
+        return status;
     }
     ALOGV("getParameter: no active player");
     return INVALID_OPERATION;
@@ -833,7 +879,7 @@
     case MEDIA_NOP: // interface test message
         break;
     case MEDIA_PREPARED:
-        ALOGV("prepared");
+        ALOGV("MediaPlayer::notify() prepared");
         mCurrentState = MEDIA_PLAYER_PREPARED;
         if (mPrepareSync) {
             ALOGV("signal application thread");
@@ -842,6 +888,9 @@
             mSignal.signal();
         }
         break;
+    case MEDIA_DRM_INFO:
+        ALOGV("MediaPlayer::notify() MEDIA_DRM_INFO(%d, %d, %d, %p)", msg, ext1, ext2, obj);
+        break;
     case MEDIA_PLAYBACK_COMPLETE:
         ALOGV("playback complete");
         if (mCurrentState == MEDIA_PLAYER_IDLE) {
@@ -942,4 +991,123 @@
     return mPlayer->setNextPlayer(next == NULL ? NULL : next->mPlayer);
 }
 
+// ModDrm
+status_t MediaPlayer::prepareDrm(const uint8_t uuid[16], const int mode)
+{
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == NULL) {
+        return NO_INIT;
+    }
+
+    // Only allowing it in player's prepared state
+    if (!(mCurrentState & MEDIA_PLAYER_PREPARED)) {
+        ALOGE("prepareDrm must only be called in the prepared state.");
+        return INVALID_OPERATION;
+    }
+
+    status_t ret = mPlayer->prepareDrm(uuid, mode);
+    ALOGV("prepareDrm: ret=%d", ret);
+
+    return ret;
+}
+
+status_t MediaPlayer::releaseDrm()
+{
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == NULL) {
+        return NO_INIT;
+    }
+
+    // Not allowing releaseDrm in an active state
+    if (mCurrentState & (MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PAUSED)) {
+        ALOGE("releaseDrm can not be called in the started/paused state.");
+        return INVALID_OPERATION;
+    }
+
+    status_t ret = mPlayer->releaseDrm();
+    ALOGV("releaseDrm: ret=%d", ret);
+
+    return ret;
+}
+
+status_t MediaPlayer::getKeyRequest(Vector<uint8_t> const& scope, String8 const& mimeType,
+                              DrmPlugin::KeyType keyType,
+                              KeyedVector<String8, String8>& optionalParameters,
+                              Vector<uint8_t>& request, String8& defaultUrl,
+                              DrmPlugin::KeyRequestType& keyRequestType)
+{
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == NULL) {
+        return NO_INIT;
+    }
+
+    // Not enforcing a particular state beyond the checks enforced by the Java layer
+    // Key exchange can happen after the start.
+    status_t ret = mPlayer->getKeyRequest(scope, mimeType, keyType, optionalParameters,
+                                     request, defaultUrl, keyRequestType);
+    ALOGV("getKeyRequest ret=%d  %d %s %d ", ret,
+          (int)request.size(), defaultUrl.string(), (int)keyRequestType);
+
+    return ret;
+}
+
+status_t MediaPlayer::provideKeyResponse(Vector<uint8_t>& releaseKeySetId,
+                              Vector<uint8_t>& response, Vector<uint8_t>& keySetId)
+{
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == NULL) {
+        return NO_INIT;
+    }
+
+    // Not enforcing a particular state beyond the checks enforced by the Java layer
+    // Key exchange can happen after the start.
+    status_t ret = mPlayer->provideKeyResponse(releaseKeySetId, response, keySetId);
+    ALOGV("provideKeyResponse: ret=%d", ret);
+
+    return ret;
+}
+
+status_t MediaPlayer::restoreKeys(Vector<uint8_t> const& keySetId)
+{
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == NULL) {
+        return NO_INIT;
+    }
+
+    // Not enforcing a particular state beyond the checks enforced by the Java layer
+    // Key exchange can happen after the start.
+    status_t ret = mPlayer->restoreKeys(keySetId);
+    ALOGV("restoreKeys: ret=%d", ret);
+
+    return ret;
+}
+
+status_t MediaPlayer::getDrmPropertyString(String8 const& name, String8& value)
+{
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == NULL) {
+        return NO_INIT;
+    }
+
+    // Not enforcing a particular state beyond the checks enforced by the Java layer
+    status_t ret = mPlayer->getDrmPropertyString(name, value);
+    ALOGV("getDrmPropertyString: ret=%d", ret);
+
+    return ret;
+}
+
+status_t MediaPlayer::setDrmPropertyString(String8 const& name, String8 const& value)
+{
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == NULL) {
+        return NO_INIT;
+    }
+
+    // Not enforcing a particular state beyond the checks enforced by the Java layer
+    status_t ret = mPlayer->setDrmPropertyString(name, value);
+    ALOGV("setDrmPropertyString: ret=%d", ret);
+
+    return ret;
+}
+
 } // namespace android
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 6eb208c..dbe4b3b 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -256,6 +256,7 @@
         return INVALID_OPERATION;
     }
 
+
     status_t ret = mMediaRecorder->setAudioEncoder(ae);
     if (OK != ret) {
         ALOGV("setAudioEncoder failed: %d", ret);
@@ -266,9 +267,9 @@
     return ret;
 }
 
-status_t MediaRecorder::setOutputFile(int fd, int64_t offset, int64_t length)
+status_t MediaRecorder::setOutputFile(int fd)
 {
-    ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
+    ALOGV("setOutputFile(%d)", fd);
     if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
@@ -288,14 +289,19 @@
     // the invalid file descritpor never gets invoked. This is to workaround
     // this issue by checking the file descriptor first before passing
     // it through binder call.
-    if (fd < 0) {
-        ALOGE("Invalid file descriptor: %d", fd);
+    int flags = fcntl(fd, F_GETFL);
+    if (flags == -1) {
+        ALOGE("Fail to get File Status Flags err: %s", strerror(errno));
+    }
+    // fd must be in read-write mode or write-only mode.
+    if ((flags & (O_RDWR | O_WRONLY)) == 0) {
+        ALOGE("File descriptor is not in read-write mode or write-only mode");
         return BAD_VALUE;
     }
 
-    status_t ret = mMediaRecorder->setOutputFile(fd, offset, length);
+    status_t ret = mMediaRecorder->setOutputFile(fd);
     if (OK != ret) {
-        ALOGV("setOutputFile failed: %d", ret);
+        ALOGE("setOutputFile failed: %d", ret);
         mCurrentState = MEDIA_RECORDER_ERROR;
         return ret;
     }
@@ -303,6 +309,37 @@
     return ret;
 }
 
+status_t MediaRecorder::setNextOutputFile(int fd)
+{
+    ALOGV("setNextOutputFile(%d)", fd);
+    if (mMediaRecorder == NULL) {
+        ALOGE("media recorder is not initialized yet");
+        return INVALID_OPERATION;
+    }
+
+    // It appears that if an invalid file descriptor is passed through
+    // binder calls, the server-side of the inter-process function call
+    // is skipped. As a result, the check at the server-side to catch
+    // the invalid file descritpor never gets invoked. This is to workaround
+    // this issue by checking the file descriptor first before passing
+    // it through binder call.
+    int flags = fcntl(fd, F_GETFL);
+    if (flags == -1) {
+        ALOGE("Fail to get File Status Flags err: %s", strerror(errno));
+    }
+    // fd must be in read-write mode or write-only mode.
+    if ((flags & (O_RDWR | O_WRONLY)) == 0) {
+        ALOGE("File descriptor is not in read-write mode or write-only mode");
+        return BAD_VALUE;
+    }
+
+    status_t ret = mMediaRecorder->setNextOutputFile(fd);
+    if (OK != ret) {
+        ALOGE("setNextOutputFile failed: %d", ret);
+    }
+    return ret;
+}
+
 status_t MediaRecorder::setVideoSize(int width, int height)
 {
     ALOGV("setVideoSize(%d, %d)", width, height);
diff --git a/media/libmediaanalyticsservice/Android.mk b/media/libmediaanalyticsservice/Android.mk
deleted file mode 100644
index dd59651..0000000
--- a/media/libmediaanalyticsservice/Android.mk
+++ /dev/null
@@ -1,44 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-#
-# libmediaanalyticsservice
-#
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:=               \
-    MediaAnalyticsService.cpp      \
-
-LOCAL_SHARED_LIBRARIES :=       \
-    libbinder                   \
-    libcutils                   \
-    liblog                      \
-    libdl                       \
-    libgui                      \
-    libmedia                    \
-    libmediautils               \
-    libstagefright_foundation   \
-    libutils
-
-LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libmedia
-
-LOCAL_C_INCLUDES :=                                                 \
-    $(TOP)/frameworks/av/media/libstagefright/include               \
-    $(TOP)/frameworks/av/media/libstagefright/rtsp                  \
-    $(TOP)/frameworks/av/media/libstagefright/wifi-display          \
-    $(TOP)/frameworks/av/media/libstagefright/webm                  \
-    $(TOP)/frameworks/av/include/media                              \
-    $(TOP)/frameworks/av/include/camera                             \
-    $(TOP)/frameworks/native/include/media/openmax                  \
-    $(TOP)/frameworks/native/include/media/hardware                 \
-    $(TOP)/external/tremolo/Tremolo                                 \
-    libcore/include                                                 \
-
-LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
-LOCAL_CLANG := true
-
-LOCAL_MODULE:= libmediaanalyticsservice
-
-include $(BUILD_SHARED_LIBRARY)
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 065738e..2d4c475 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -975,13 +975,8 @@
 status_t MediaPlayerService::Client::setBufferingSettings(
         const BufferingSettings& buffering)
 {
-    ALOGV("[%d] setBufferingSettings(%d, %d, %d, %d, %d, %d, %d, %d)",
-            mConnId, buffering.mInitialBufferingMode, buffering.mRebufferingMode,
-            buffering.mInitialWatermarkMs, buffering.mInitialWatermarkKB,
-            buffering.mRebufferingWatermarkLowMs,
-            buffering.mRebufferingWatermarkHighMs,
-            buffering.mRebufferingWatermarkLowKB,
-            buffering.mRebufferingWatermarkHighKB);
+    ALOGV("[%d] setBufferingSettings{%s}",
+            mConnId, buffering.toString().string());
     sp<MediaPlayerBase> p = getPlayer();
     if (p == 0) return UNKNOWN_ERROR;
     return p->setBufferingSettings(buffering);
@@ -995,13 +990,8 @@
     if (p == 0) return UNKNOWN_ERROR;
     status_t ret = p->getDefaultBufferingSettings(buffering);
     if (ret == NO_ERROR) {
-        ALOGV("[%d] getDefaultBufferingSettings(%d, %d, %d, %d, %d, %d, %d, %d)",
-                mConnId, buffering->mInitialBufferingMode, buffering->mRebufferingMode,
-                buffering->mInitialWatermarkMs, buffering->mInitialWatermarkKB,
-                buffering->mRebufferingWatermarkLowMs,
-                buffering->mRebufferingWatermarkHighMs,
-                buffering->mRebufferingWatermarkLowKB,
-                buffering->mRebufferingWatermarkHighKB);
+        ALOGV("[%d] getDefaultBufferingSettings{%s}",
+                mConnId, buffering->toString().string());
     } else {
         ALOGV("[%d] getDefaultBufferingSettings returned %d", mConnId, ret);
     }
@@ -1328,16 +1318,33 @@
     }
 
     sp<IMediaPlayerClient> c;
+    sp<Client> nextClient;
+    status_t errStartNext = NO_ERROR;
     {
         Mutex::Autolock l(client->mLock);
         c = client->mClient;
         if (msg == MEDIA_PLAYBACK_COMPLETE && client->mNextClient != NULL) {
+            nextClient = client->mNextClient;
+
             if (client->mAudioOutput != NULL)
                 client->mAudioOutput->switchToNextOutput();
-            client->mNextClient->start();
-            if (client->mNextClient->mClient != NULL) {
-                client->mNextClient->mClient->notify(
-                        MEDIA_INFO, MEDIA_INFO_STARTED_AS_NEXT, 0, obj);
+
+            errStartNext = nextClient->start();
+        }
+    }
+
+    if (nextClient != NULL) {
+        sp<IMediaPlayerClient> nc;
+        {
+            Mutex::Autolock l(nextClient->mLock);
+            nc = nextClient->mClient;
+        }
+        if (nc != NULL) {
+            if (errStartNext == NO_ERROR) {
+                nc->notify(MEDIA_INFO, MEDIA_INFO_STARTED_AS_NEXT, 0, obj);
+            } else {
+                nc->notify(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN , 0, obj);
+                ALOGE("gapless:start playback for next track failed, err(%d)", errStartNext);
             }
         }
     }
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index db182b2..819973e 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -347,6 +347,32 @@
         virtual status_t        dump(int fd, const Vector<String16>& args);
 
                 audio_session_t getAudioSessionId() { return mAudioSessionId; }
+        // ModDrm
+        virtual status_t prepareDrm(const uint8_t /*uuid*/[16], const int /*mode*/)
+                            { return INVALID_OPERATION; }
+        virtual status_t releaseDrm()
+                            { return INVALID_OPERATION; }
+        virtual status_t getKeyRequest(Vector<uint8_t> const& /*scope*/,
+                                 String8 const& /*mimeType*/,
+                                 DrmPlugin::KeyType /*keyType*/,
+                                 KeyedVector<String8, String8>& /*optionalParameters*/,
+                                 Vector<uint8_t>& /*request*/,
+                                 String8& /*defaultUrl*/,
+                                 DrmPlugin::KeyRequestType& /*keyRequestType*/)
+                            { return INVALID_OPERATION; }
+        virtual status_t provideKeyResponse(Vector<uint8_t>& /*releaseKeySetId*/,
+                                 Vector<uint8_t>& /*response*/,
+                                 Vector<uint8_t>& /*keySetId*/)
+                            { return INVALID_OPERATION; }
+        virtual status_t restoreKeys(Vector<uint8_t> const& /*keySetId*/)
+                            { return INVALID_OPERATION; }
+        virtual status_t getDrmPropertyString(String8 const& /*name*/,
+                                              String8& /*value*/)
+                            { return INVALID_OPERATION; }
+        virtual status_t setDrmPropertyString(String8 const& /*name*/,
+                                              String8 const& /*value*/)
+                            { return INVALID_OPERATION; }
+
 
     private:
         class ServiceDeathNotifier: public IBinder::DeathRecipient
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 2a07e5b..bb2d28b 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -161,15 +161,26 @@
     return mRecorder->setAudioEncoder((audio_encoder)ae);
 }
 
-status_t MediaRecorderClient::setOutputFile(int fd, int64_t offset, int64_t length)
+status_t MediaRecorderClient::setOutputFile(int fd)
 {
-    ALOGV("setOutputFile(%d, %lld, %lld)", fd, (long long)offset, (long long)length);
+    ALOGV("setOutputFile(%d)", fd);
     Mutex::Autolock lock(mLock);
     if (mRecorder == NULL) {
         ALOGE("recorder is not initialized");
         return NO_INIT;
     }
-    return mRecorder->setOutputFile(fd, offset, length);
+    return mRecorder->setOutputFile(fd);
+}
+
+status_t MediaRecorderClient::setNextOutputFile(int fd)
+{
+    ALOGV("setNextOutputFile(%d)", fd);
+    Mutex::Autolock lock(mLock);
+    if (mRecorder == NULL) {
+        ALOGE("recorder is not initialized");
+        return NO_INIT;
+    }
+    return mRecorder->setNextOutputFile(fd);
 }
 
 status_t MediaRecorderClient::setVideoSize(int width, int height)
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 83ef80a..8ddd680 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -53,8 +53,8 @@
     virtual     status_t   setOutputFormat(int of);
     virtual     status_t   setVideoEncoder(int ve);
     virtual     status_t   setAudioEncoder(int ae);
-    virtual     status_t   setOutputFile(int fd, int64_t offset,
-                                                  int64_t length);
+    virtual     status_t   setOutputFile(int fd);
+    virtual     status_t   setNextOutputFile(int fd);
     virtual     status_t   setVideoSize(int width, int height);
     virtual     status_t   setVideoFrameRate(int frames_per_second);
     virtual     status_t   setParameters(const String8& params);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 279bc86..9ce65c4 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -255,11 +255,8 @@
     return OK;
 }
 
-status_t StagefrightRecorder::setOutputFile(int fd, int64_t offset, int64_t length) {
-    ALOGV("setOutputFile: %d, %lld, %lld", fd, (long long)offset, (long long)length);
-    // These don't make any sense, do they?
-    CHECK_EQ(offset, 0ll);
-    CHECK_EQ(length, 0ll);
+status_t StagefrightRecorder::setOutputFile(int fd) {
+    ALOGV("setOutputFile: %d", fd);
 
     if (fd < 0) {
         ALOGE("Invalid file descriptor: %d", fd);
@@ -277,6 +274,25 @@
     return OK;
 }
 
+status_t StagefrightRecorder::setNextOutputFile(int fd) {
+    // Only support MPEG4
+    if (mOutputFormat != OUTPUT_FORMAT_MPEG_4) {
+        ALOGE("Only MP4 file format supports setting next output file");
+        return INVALID_OPERATION;
+    }
+    ALOGV("setNextOutputFile: %d", fd);
+
+    if (fd < 0) {
+        ALOGE("Invalid file descriptor: %d", fd);
+        return -EBADF;
+    }
+
+    // start with a clean, empty file
+    ftruncate(fd, 0);
+    int nextFd = dup(fd);
+    return mWriter->setNextFd(nextFd);
+}
+
 // Attempt to parse an float literal optionally surrounded by whitespace,
 // returns true on success, false otherwise.
 static bool safe_strtof(const char *s, float *val) {
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 4c2e65c..870c5d0 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -55,7 +55,8 @@
     virtual status_t setCamera(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
     virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
     virtual status_t setInputSurface(const sp<PersistentSurface>& surface);
-    virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
+    virtual status_t setOutputFile(int fd);
+    virtual status_t setNextOutputFile(int fd);
     virtual status_t setParameters(const String8 &params);
     virtual status_t setListener(const sp<IMediaRecorderClient> &listener);
     virtual status_t setClientName(const String16 &clientName);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index d1d1077..91a2b7b 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -38,11 +38,12 @@
 
 namespace android {
 
-static int64_t kLowWaterMarkUs = 2000000ll;  // 2secs
-static int64_t kHighWaterMarkUs = 5000000ll;  // 5secs
-static int64_t kHighWaterMarkRebufferUs = 15000000ll;  // 15secs
-static const ssize_t kLowWaterMarkBytes = 40000;
-static const ssize_t kHighWaterMarkBytes = 200000;
+static const int kLowWaterMarkMs          = 2000;  // 2secs
+static const int kHighWaterMarkMs         = 5000;  // 5secs
+static const int kHighWaterMarkRebufferMs = 15000;  // 15secs
+
+static const int kLowWaterMarkKB  = 40;
+static const int kHighWaterMarkKB = 200;
 
 NuPlayer::GenericSource::GenericSource(
         const sp<AMessage> &notify,
@@ -237,6 +238,16 @@
     return OK;
 }
 
+status_t NuPlayer::GenericSource::getDefaultBufferingSettings(
+        BufferingSettings* buffering /* nonnull */) {
+    mBufferingMonitor->getDefaultBufferingSettings(buffering);
+    return OK;
+}
+
+status_t NuPlayer::GenericSource::setBufferingSettings(const BufferingSettings& buffering) {
+    return mBufferingMonitor->setBufferingSettings(buffering);
+}
+
 status_t NuPlayer::GenericSource::startSources() {
     // Start the selected A/V tracks now before we start buffering.
     // Widevine sources might re-initialize crypto when starting, if we delay
@@ -618,6 +629,12 @@
           break;
       }
 
+      case kWhatGetTrackInfo:
+      {
+          onGetTrackInfo(msg);
+          break;
+      }
+
       case kWhatSelectTrack:
       {
           onSelectTrack(msg);
@@ -868,6 +885,34 @@
 }
 
 sp<AMessage> NuPlayer::GenericSource::getTrackInfo(size_t trackIndex) const {
+    sp<AMessage> msg = new AMessage(kWhatGetTrackInfo, this);
+    msg->setSize("trackIndex", trackIndex);
+
+    sp<AMessage> response;
+    sp<RefBase> format;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findObject("format", &format));
+        return static_cast<AMessage*>(format.get());
+    } else {
+        return NULL;
+    }
+}
+
+void NuPlayer::GenericSource::onGetTrackInfo(const sp<AMessage>& msg) const {
+    size_t trackIndex;
+    CHECK(msg->findSize("trackIndex", &trackIndex));
+
+    sp<AMessage> response = new AMessage;
+    sp<AMessage> format = doGetTrackInfo(trackIndex);
+    response->setObject("format", format);
+
+    sp<AReplyToken> replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
+sp<AMessage> NuPlayer::GenericSource::doGetTrackInfo(size_t trackIndex) const {
     size_t trackCount = mSources.size();
     if (trackIndex >= trackCount) {
         return NULL;
@@ -1435,11 +1480,54 @@
       mFirstDequeuedBufferRealUs(-1ll),
       mFirstDequeuedBufferMediaUs(-1ll),
       mlastDequeuedBufferMediaUs(-1ll) {
+      getDefaultBufferingSettings(&mSettings);
 }
 
 NuPlayer::GenericSource::BufferingMonitor::~BufferingMonitor() {
 }
 
+void NuPlayer::GenericSource::BufferingMonitor::getDefaultBufferingSettings(
+        BufferingSettings *buffering /* nonnull */) {
+    buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mRebufferingMode = BUFFERING_MODE_TIME_THEN_SIZE;
+    buffering->mInitialWatermarkMs = kHighWaterMarkMs;
+    buffering->mRebufferingWatermarkLowMs = kLowWaterMarkMs;
+    buffering->mRebufferingWatermarkHighMs = kHighWaterMarkRebufferMs;
+    buffering->mRebufferingWatermarkLowKB = kLowWaterMarkKB;
+    buffering->mRebufferingWatermarkHighKB = kHighWaterMarkKB;
+
+    ALOGV("BufferingMonitor::getDefaultBufferingSettings{%s}",
+            buffering->toString().string());
+}
+
+status_t NuPlayer::GenericSource::BufferingMonitor::setBufferingSettings(
+        const BufferingSettings &buffering) {
+    ALOGV("BufferingMonitor::setBufferingSettings{%s}",
+            buffering.toString().string());
+
+    Mutex::Autolock _l(mLock);
+    if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+            || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
+                && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)
+            || (buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+                && buffering.mRebufferingWatermarkLowKB > buffering.mRebufferingWatermarkHighKB)) {
+        return BAD_VALUE;
+    }
+    mSettings = buffering;
+    if (mSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+        mSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+    }
+    if (!mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
+        mSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+        mSettings.mRebufferingWatermarkHighMs = INT32_MAX;
+    }
+    if (!mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
+        mSettings.mRebufferingWatermarkLowKB = BufferingSettings::kNoWatermark;
+        mSettings.mRebufferingWatermarkHighKB = INT32_MAX;
+    }
+    return OK;
+}
+
 void NuPlayer::GenericSource::BufferingMonitor::prepare(
         const sp<NuCachedSource2> &cachedSource,
         int64_t durationUs,
@@ -1668,7 +1756,9 @@
 
         stopBufferingIfNecessary_l();
         return;
-    } else if (cachedDurationUs >= 0ll) {
+    }
+
+    if (cachedDurationUs >= 0ll) {
         if (mDurationUs > 0ll) {
             int64_t cachedPosUs = getLastReadPosition_l() + cachedDurationUs;
             int percentage = 100.0 * cachedPosUs / mDurationUs;
@@ -1679,36 +1769,40 @@
             notifyBufferingUpdate_l(percentage);
         }
 
-        ALOGV("onPollBuffering_l: cachedDurationUs %.1f sec",
-                cachedDurationUs / 1000000.0f);
+        ALOGV("onPollBuffering_l: cachedDurationUs %.1f sec", cachedDurationUs / 1000000.0f);
 
-        if (cachedDurationUs < kLowWaterMarkUs) {
-            // Take into account the data cached in downstream components to try to avoid
-            // unnecessary pause.
-            if (mOffloadAudio && mFirstDequeuedBufferRealUs >= 0) {
-                int64_t downStreamCacheUs = mlastDequeuedBufferMediaUs - mFirstDequeuedBufferMediaUs
-                        - (ALooper::GetNowUs() - mFirstDequeuedBufferRealUs);
-                if (downStreamCacheUs > 0) {
-                    cachedDurationUs += downStreamCacheUs;
+        if (mPrepareBuffering) {
+            if (cachedDurationUs > mSettings.mInitialWatermarkMs * 1000) {
+                stopBufferingIfNecessary_l();
+            }
+        } else if (mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
+            if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
+                // Take into account the data cached in downstream components to try to avoid
+                // unnecessary pause.
+                if (mOffloadAudio && mFirstDequeuedBufferRealUs >= 0) {
+                    int64_t downStreamCacheUs =
+                        mlastDequeuedBufferMediaUs - mFirstDequeuedBufferMediaUs
+                            - (ALooper::GetNowUs() - mFirstDequeuedBufferRealUs);
+                    if (downStreamCacheUs > 0) {
+                        cachedDurationUs += downStreamCacheUs;
+                    }
                 }
-            }
 
-            if (cachedDurationUs < kLowWaterMarkUs) {
-                startBufferingIfNecessary_l();
-            }
-        } else {
-            int64_t highWaterMark = mPrepareBuffering ? kHighWaterMarkUs : kHighWaterMarkRebufferUs;
-            if (cachedDurationUs > highWaterMark) {
+                if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
+                    startBufferingIfNecessary_l();
+                }
+            } else if (cachedDurationUs > mSettings.mRebufferingWatermarkHighMs * 1000) {
                 stopBufferingIfNecessary_l();
             }
         }
-    } else if (cachedDataRemaining >= 0) {
+    } else if (cachedDataRemaining >= 0
+            && mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
         ALOGV("onPollBuffering_l: cachedDataRemaining %zd bytes",
                 cachedDataRemaining);
 
-        if (cachedDataRemaining < kLowWaterMarkBytes) {
+        if (cachedDataRemaining < (mSettings.mRebufferingWatermarkLowKB << 10)) {
             startBufferingIfNecessary_l();
-        } else if (cachedDataRemaining > kHighWaterMarkBytes) {
+        } else if (cachedDataRemaining > (mSettings.mRebufferingWatermarkHighKB << 10)) {
             stopBufferingIfNecessary_l();
         }
     }
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index a14056f..e1949f3 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -50,6 +50,10 @@
 
     status_t setDataSource(const sp<DataSource>& dataSource);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
 
     virtual void start();
@@ -119,6 +123,9 @@
     public:
         explicit BufferingMonitor(const sp<AMessage> &notify);
 
+        void getDefaultBufferingSettings(BufferingSettings *buffering /* nonnull */);
+        status_t setBufferingSettings(const BufferingSettings &buffering);
+
         // Set up state.
         void prepare(const sp<NuCachedSource2> &cachedSource,
                 int64_t durationUs,
@@ -167,6 +174,7 @@
 
         mutable Mutex mLock;
 
+        BufferingSettings mSettings;
         bool mOffloadAudio;
         int64_t mFirstDequeuedBufferRealUs;
         int64_t mFirstDequeuedBufferMediaUs;
@@ -245,6 +253,9 @@
     void onGetFormatMeta(const sp<AMessage>& msg) const;
     sp<MetaData> doGetFormatMeta(bool audio) const;
 
+    void onGetTrackInfo(const sp<AMessage>& msg) const;
+    sp<AMessage> doGetTrackInfo(size_t trackIndex) const;
+
     void onGetSelectedTrack(const sp<AMessage>& msg) const;
     ssize_t doGetSelectedTrack(media_track_type type) const;
 
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 51bfad4..05e6201 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -32,6 +32,11 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/Utils.h>
 
+// default buffer prepare/ready/underflow marks
+static const int kReadyMarkMs     = 5000;  // 5 seconds
+static const int kPrepareMarkMs   = 1500;  // 1.5 seconds
+static const int kUnderflowMarkMs = 1000;  // 1 second
+
 namespace android {
 
 NuPlayer::HTTPLiveSource::HTTPLiveSource(
@@ -49,6 +54,7 @@
       mFetchMetaDataGeneration(0),
       mHasMetadata(false),
       mMetadataSelected(false) {
+    getDefaultBufferingSettings(&mBufferingSettings);
     if (headers) {
         mExtraHeaders = *headers;
 
@@ -76,6 +82,42 @@
     }
 }
 
+status_t NuPlayer::HTTPLiveSource::getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) {
+    buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mRebufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mInitialWatermarkMs = kPrepareMarkMs;
+    buffering->mRebufferingWatermarkLowMs = kUnderflowMarkMs;
+    buffering->mRebufferingWatermarkHighMs = kReadyMarkMs;
+
+    return OK;
+}
+
+status_t NuPlayer::HTTPLiveSource::setBufferingSettings(const BufferingSettings& buffering) {
+    if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+            || buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+            || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
+                && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)) {
+        return BAD_VALUE;
+    }
+
+    mBufferingSettings = buffering;
+
+    if (mBufferingSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+        mBufferingSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+    }
+    if (mBufferingSettings.mRebufferingMode == BUFFERING_MODE_NONE) {
+        mBufferingSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+        mBufferingSettings.mRebufferingWatermarkHighMs = INT32_MAX;
+    }
+
+    if (mLiveSession != NULL) {
+        mLiveSession->setBufferingSettings(mBufferingSettings);
+    }
+
+    return OK;
+}
+
 void NuPlayer::HTTPLiveSource::prepareAsync() {
     if (mLiveLooper == NULL) {
         mLiveLooper = new ALooper;
@@ -94,6 +136,7 @@
 
     mLiveLooper->registerHandler(mLiveSession);
 
+    mLiveSession->setBufferingSettings(mBufferingSettings);
     mLiveSession->connectAsync(
             mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
 }
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index 45fc8c1..2866a6a 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -34,6 +34,10 @@
             const char *url,
             const KeyedVector<String8, String8> *headers);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
     virtual void start();
 
@@ -80,6 +84,7 @@
     int32_t mFetchMetaDataGeneration;
     bool mHasMetadata;
     bool mMetadataSelected;
+    BufferingSettings mBufferingSettings;
 
     void onSessionNotify(const sp<AMessage> &msg);
     void pollForRawData(
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index d1ade1d..6593fcd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -314,6 +314,31 @@
     msg->post();
 }
 
+status_t NuPlayer::getDefaultBufferingSettings(
+        BufferingSettings *buffering /* nonnull */) {
+    sp<AMessage> msg = new AMessage(kWhatGetDefaultBufferingSettings, this);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+        if (err == OK) {
+            readFromAMessage(response, buffering);
+        }
+    }
+    return err;
+}
+
+status_t NuPlayer::setBufferingSettings(const BufferingSettings& buffering) {
+    sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+    writeToAMessage(msg, buffering);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
+}
+
 void NuPlayer::prepareAsync() {
     (new AMessage(kWhatPrepare, this))->post();
 }
@@ -508,6 +533,48 @@
             break;
         }
 
+        case kWhatGetDefaultBufferingSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            ALOGV("kWhatGetDefaultBufferingSettings");
+            BufferingSettings buffering;
+            status_t err = OK;
+            if (mSource != NULL) {
+                err = mSource->getDefaultBufferingSettings(&buffering);
+            } else {
+                err = INVALID_OPERATION;
+            }
+            sp<AMessage> response = new AMessage;
+            if (err == OK) {
+                writeToAMessage(response, buffering);
+            }
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatSetBufferingSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            ALOGV("kWhatSetBufferingSettings");
+            BufferingSettings buffering;
+            readFromAMessage(msg, &buffering);
+            status_t err = OK;
+            if (mSource != NULL) {
+                err = mSource->setBufferingSettings(buffering);
+            } else {
+                err = INVALID_OPERATION;
+            }
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
         case kWhatPrepare:
         {
             mSource->prepareAsync();
@@ -1755,7 +1822,8 @@
     // Take into account sample aspect ratio if necessary:
     int32_t sarWidth, sarHeight;
     if (inputFormat->findInt32("sar-width", &sarWidth)
-            && inputFormat->findInt32("sar-height", &sarHeight)) {
+            && inputFormat->findInt32("sar-height", &sarHeight)
+            && sarWidth > 0 && sarHeight > 0) {
         ALOGV("Sample aspect ratio %d : %d", sarWidth, sarHeight);
 
         displayWidth = (displayWidth * sarWidth) / sarHeight;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 6f737bb..cc8c97a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -50,6 +50,9 @@
 
     void setDataSourceAsync(const sp<DataSource> &source);
 
+    status_t getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */);
+    status_t setBufferingSettings(const BufferingSettings& buffering);
+
     void prepareAsync();
 
     void setVideoSurfaceTextureAsync(
@@ -137,6 +140,8 @@
         kWhatGetTrackInfo               = 'gTrI',
         kWhatGetSelectedTrack           = 'gSel',
         kWhatSelectTrack                = 'selT',
+        kWhatGetDefaultBufferingSettings = 'gDBS',
+        kWhatSetBufferingSettings       = 'sBuS',
     };
 
     wp<NuPlayerDriver> mDriver;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index f7e56e4..0ddbd63 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -204,6 +204,26 @@
     return OK;
 }
 
+status_t NuPlayerDriver::getDefaultBufferingSettings(BufferingSettings* buffering) {
+    ALOGV("getDefaultBufferingSettings(%p)", this);
+    Mutex::Autolock autoLock(mLock);
+    if (mState == STATE_IDLE) {
+        return INVALID_OPERATION;
+    }
+
+    return mPlayer->getDefaultBufferingSettings(buffering);
+}
+
+status_t NuPlayerDriver::setBufferingSettings(const BufferingSettings& buffering) {
+    ALOGV("setBufferingSettings(%p)", this);
+    Mutex::Autolock autoLock(mLock);
+    if (mState == STATE_IDLE) {
+        return INVALID_OPERATION;
+    }
+
+    return mPlayer->setBufferingSettings(buffering);
+}
+
 status_t NuPlayerDriver::prepare() {
     ALOGV("prepare(%p)", this);
     Mutex::Autolock autoLock(mLock);
@@ -686,7 +706,16 @@
     return INVALID_OPERATION;
 }
 
-status_t NuPlayerDriver::getParameter(int /* key */, Parcel * /* reply */) {
+status_t NuPlayerDriver::getParameter(int key, Parcel *reply) {
+
+    if (key == FOURCC('m','t','r','X')) {
+        // mtrX -- a play on 'metrics' (not matrix)
+        // gather current info all together, parcel it, and send it back
+        finalizeMetrics("api");
+        mAnalyticsItem->writeToParcel(reply);
+        return OK;
+    }
+
     return INVALID_OPERATION;
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index 9b784ae..5bfc539 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -44,6 +44,11 @@
 
     virtual status_t setVideoSurfaceTexture(
             const sp<IGraphicBufferProducer> &bufferProducer);
+
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual status_t prepare();
     virtual status_t prepareAsync();
     virtual status_t start();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 6006730..0429ef1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -63,6 +63,10 @@
         : mNotify(notify) {
     }
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) = 0;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) = 0;
+
     virtual void prepareAsync() = 0;
 
     virtual void start() = 0;
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index fb1f31a..9264e49 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -32,11 +32,11 @@
 
 const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
 
-// Buffer Underflow/Prepare/StartServer/Overflow Marks
-const int64_t NuPlayer::RTSPSource::kUnderflowMarkUs   =  1000000ll;
-const int64_t NuPlayer::RTSPSource::kPrepareMarkUs     =  3000000ll;
-const int64_t NuPlayer::RTSPSource::kStartServerMarkUs =  5000000ll;
-const int64_t NuPlayer::RTSPSource::kOverflowMarkUs    = 10000000ll;
+// Default Buffer Underflow/Prepare/StartServer/Overflow Marks
+static const int kUnderflowMarkMs   =  1000;  // 1 second
+static const int kPrepareMarkMs     =  3000;  // 3 seconds
+//static const int kStartServerMarkMs =  5000;
+static const int kOverflowMarkMs    = 10000;  // 10 seconds
 
 NuPlayer::RTSPSource::RTSPSource(
         const sp<AMessage> &notify,
@@ -62,6 +62,7 @@
       mSeekGeneration(0),
       mEOSTimeoutAudio(0),
       mEOSTimeoutVideo(0) {
+    getDefaultBufferingSettings(&mBufferingSettings);
     if (headers) {
         mExtraHeaders = *headers;
 
@@ -83,6 +84,34 @@
     }
 }
 
+status_t NuPlayer::RTSPSource::getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) {
+    buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mRebufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mInitialWatermarkMs = kPrepareMarkMs;
+    buffering->mRebufferingWatermarkLowMs = kUnderflowMarkMs;
+    buffering->mRebufferingWatermarkHighMs = kOverflowMarkMs;
+
+    return OK;
+}
+
+status_t NuPlayer::RTSPSource::setBufferingSettings(const BufferingSettings& buffering) {
+    if (mLooper == NULL) {
+        mBufferingSettings = buffering;
+        return OK;
+    }
+
+    sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+    writeToAMessage(msg, buffering);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+
+    return err;
+}
+
 void NuPlayer::RTSPSource::prepareAsync() {
     if (mIsSDP && mHTTPService == NULL) {
         notifyPrepared(BAD_VALUE);
@@ -328,7 +357,8 @@
         int64_t bufferedDurationUs = src->getBufferedDurationUs(&finalResult);
 
         // isFinished when duration is 0 checks for EOS result only
-        if (bufferedDurationUs > kPrepareMarkUs || src->isFinished(/* duration */ 0)) {
+        if (bufferedDurationUs > mBufferingSettings.mInitialWatermarkMs * 1000
+                || src->isFinished(/* duration */ 0)) {
             ++preparedCount;
         }
 
@@ -336,13 +366,16 @@
             ++overflowCount;
             ++finishedCount;
         } else {
-            if (bufferedDurationUs < kUnderflowMarkUs) {
+            if (bufferedDurationUs < mBufferingSettings.mRebufferingWatermarkLowMs * 1000) {
                 ++underflowCount;
             }
-            if (bufferedDurationUs > kOverflowMarkUs) {
+            if (bufferedDurationUs > mBufferingSettings.mRebufferingWatermarkHighMs * 1000) {
                 ++overflowCount;
             }
-            if (bufferedDurationUs < kStartServerMarkUs) {
+            int64_t startServerMarkUs =
+                    (mBufferingSettings.mRebufferingWatermarkLowMs
+                        + mBufferingSettings.mRebufferingWatermarkHighMs) / 2 * 1000ll;
+            if (bufferedDurationUs < startServerMarkUs) {
                 ++startCount;
             }
         }
@@ -479,6 +512,36 @@
     } else if (msg->what() == kWhatSignalEOS) {
         onSignalEOS(msg);
         return;
+    } else if (msg->what() == kWhatSetBufferingSettings) {
+        sp<AReplyToken> replyID;
+        CHECK(msg->senderAwaitsResponse(&replyID));
+
+        BufferingSettings buffering;
+        readFromAMessage(msg, &buffering);
+
+        status_t err = OK;
+        if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+                || buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+                || (buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs
+                    && buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode))) {
+            err = BAD_VALUE;
+        } else {
+            if (buffering.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+                buffering.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+            }
+            if (buffering.mRebufferingMode == BUFFERING_MODE_NONE) {
+                buffering.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+                buffering.mRebufferingWatermarkHighMs = INT32_MAX;
+            }
+
+            mBufferingSettings = buffering;
+        }
+
+        sp<AMessage> response = new AMessage;
+        response->setInt32("err", err);
+        response->postReply(replyID);
+
+        return;
     }
 
     CHECK_EQ(msg->what(), (int)kWhatNotify);
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index 363f8bb..0812991 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -40,6 +40,10 @@
             uid_t uid = 0,
             bool isSDP = false);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
     virtual void start();
     virtual void stop();
@@ -67,6 +71,7 @@
         kWhatPerformSeek     = 'seek',
         kWhatPollBuffering   = 'poll',
         kWhatSignalEOS       = 'eos ',
+        kWhatSetBufferingSettings = 'sBuS',
     };
 
     enum State {
@@ -81,12 +86,6 @@
         kFlagIncognito = 1,
     };
 
-    // Buffer Prepare/Underflow/Overflow/Resume Marks
-    static const int64_t kPrepareMarkUs;
-    static const int64_t kUnderflowMarkUs;
-    static const int64_t kOverflowMarkUs;
-    static const int64_t kStartServerMarkUs;
-
     struct TrackInfo {
         sp<AnotherPacketSource> mSource;
 
@@ -110,6 +109,7 @@
     bool mBuffering;
     bool mInPreparationPhase;
     bool mEOSPending;
+    BufferingSettings mBufferingSettings;
 
     sp<ALooper> mLooper;
     sp<MyHandler> mHandler;
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index d6b1e8c..fc0803b 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -51,6 +51,22 @@
     }
 }
 
+status_t NuPlayer::StreamingSource::getDefaultBufferingSettings(
+        BufferingSettings *buffering /* nonnull */) {
+    *buffering = BufferingSettings();
+    return OK;
+}
+
+status_t NuPlayer::StreamingSource::setBufferingSettings(
+        const BufferingSettings &buffering) {
+    if (buffering.mInitialBufferingMode != BUFFERING_MODE_NONE
+            || buffering.mRebufferingMode != BUFFERING_MODE_NONE) {
+        return BAD_VALUE;
+    }
+
+    return OK;
+}
+
 void NuPlayer::StreamingSource::prepareAsync() {
     if (mLooper == NULL) {
         mLooper = new ALooper;
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.h b/media/libmediaplayerservice/nuplayer/StreamingSource.h
index db88c7f..2e1d2b3 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.h
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.h
@@ -32,6 +32,10 @@
             const sp<AMessage> &notify,
             const sp<IStreamSource> &source);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
     virtual void start();
 
diff --git a/media/libmediaplayerservice/tests/Android.mk b/media/libmediaplayerservice/tests/Android.mk
index c0b3265..dc761ec 100644
--- a/media/libmediaplayerservice/tests/Android.mk
+++ b/media/libmediaplayerservice/tests/Android.mk
@@ -14,6 +14,8 @@
 	libmediaplayerservice \
 	libmediadrm \
 	libutils \
+	android.hidl.base@1.0 \
+	android.hardware.drm@1.0 \
 
 LOCAL_C_INCLUDES := \
 	frameworks/av/include \
diff --git a/media/liboboe/Android.bp b/media/liboboe/Android.bp
index bfcc049..fd6591d 100644
--- a/media/liboboe/Android.bp
+++ b/media/liboboe/Android.bp
@@ -13,11 +13,11 @@
 // limitations under the License.
 
 ndk_headers {
-    name: "libOboe_headers",
+    name: "libAAudio_headers",
     from: "include",
     to: "",
-    srcs: ["include/oboe/*.h"],
-    license: "include/oboe/NOTICE",
+    srcs: ["include/aaudio/*.h"],
+    license: "include/aaudio/NOTICE",
 }
 
 ndk_library {
diff --git a/media/liboboe/README.md b/media/liboboe/README.md
index 80894c6..9f2c249 100644
--- a/media/liboboe/README.md
+++ b/media/liboboe/README.md
@@ -1 +1 @@
-Oboe Audio input/output API
+AAudio Audio input/output API
diff --git a/media/liboboe/examples/Android.mk b/media/liboboe/examples/Android.mk
new file mode 100644
index 0000000..5053e7d
--- /dev/null
+++ b/media/liboboe/examples/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/media/liboboe/examples/write_sine/Android.mk b/media/liboboe/examples/write_sine/Android.mk
new file mode 100644
index 0000000..b56328b
--- /dev/null
+++ b/media/liboboe/examples/write_sine/Android.mk
@@ -0,0 +1,6 @@
+# include $(call all-subdir-makefiles)
+
+# Just include static/ for now.
+LOCAL_PATH := $(call my-dir)
+#include $(LOCAL_PATH)/jni/Android.mk
+include $(LOCAL_PATH)/static/Android.mk
diff --git a/media/liboboe/examples/write_sine/README.md b/media/liboboe/examples/write_sine/README.md
new file mode 100644
index 0000000..b150471
--- /dev/null
+++ b/media/liboboe/examples/write_sine/README.md
@@ -0,0 +1,7 @@
+# cd to this directory
+mkdir -p jni/include/aaudio
+ln -s $PLATFORM/frameworks/av/media/liboboe/include/aaudio/*.h jni/include/aaudio
+ln -s $PLATFORM/out/target/product/$TARGET_PRODUCT/symbols/out/soong/ndk/platforms/android-current/arch-arm64/usr/lib/liboboe.so jni
+$NDK/ndk-build
+adb push libs/arm64-v8a/write_sine_threaded /data
+adb shell /data/write_sine_threaded
diff --git a/media/liboboe/examples/write_sine/jni/Android.mk b/media/liboboe/examples/write_sine/jni/Android.mk
new file mode 100644
index 0000000..51a5a85
--- /dev/null
+++ b/media/liboboe/examples/write_sine/jni/Android.mk
@@ -0,0 +1,35 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine.cpp
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
+        libbinder libcutils libutils
+LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_MODULE := write_sine_ndk
+LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine_threaded.cpp
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
+        libbinder libcutils libutils
+LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_MODULE := write_sine_threaded_ndk
+LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := liboboe_prebuilt
+LOCAL_SRC_FILES := liboboe.so
+LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
+include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/liboboe/examples/write_sine/jni/Application.mk b/media/liboboe/examples/write_sine/jni/Application.mk
new file mode 100644
index 0000000..e74475c
--- /dev/null
+++ b/media/liboboe/examples/write_sine/jni/Application.mk
@@ -0,0 +1,3 @@
+# TODO remove then when we support other architectures
+APP_ABI := arm64-v8a
+APP_CPPFLAGS += -std=c++11
diff --git a/media/liboboe/examples/write_sine/src/SineGenerator.h b/media/liboboe/examples/write_sine/src/SineGenerator.h
new file mode 100644
index 0000000..ade7527
--- /dev/null
+++ b/media/liboboe/examples/write_sine/src/SineGenerator.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SINE_GENERATOR_H
+#define SINE_GENERATOR_H
+
+#include <math.h>
+
+class SineGenerator
+{
+public:
+    SineGenerator() {}
+    virtual ~SineGenerator() = default;
+
+    void setup(double frequency, double frameRate) {
+        mFrameRate = frameRate;
+        mPhaseIncrement = frequency * M_PI * 2 / frameRate;
+    }
+
+    void setSweep(double frequencyLow, double frequencyHigh, double seconds) {
+        mPhaseIncrementLow = frequencyLow * M_PI * 2 / mFrameRate;
+        mPhaseIncrementHigh = frequencyHigh * M_PI * 2 / mFrameRate;
+
+        double numFrames = seconds * mFrameRate;
+        mUpScaler = pow((frequencyHigh / frequencyLow), (1.0 / numFrames));
+        mDownScaler = 1.0 / mUpScaler;
+        mGoingUp = true;
+        mSweeping = true;
+    }
+
+    void render(int16_t *buffer, int32_t channelStride, int32_t numFrames) {
+        int sampleIndex = 0;
+        for (int i = 0; i < numFrames; i++) {
+            buffer[sampleIndex] = (int16_t) (32767 * sin(mPhase) * mAmplitude);
+            sampleIndex += channelStride;
+            advancePhase();
+        }
+    }
+    void render(float *buffer, int32_t channelStride, int32_t numFrames) {
+        int sampleIndex = 0;
+        for (int i = 0; i < numFrames; i++) {
+            buffer[sampleIndex] = sin(mPhase) * mAmplitude;
+            sampleIndex += channelStride;
+            advancePhase();
+        }
+    }
+
+private:
+    void advancePhase() {
+        mPhase += mPhaseIncrement;
+        if (mPhase > M_PI * 2) {
+            mPhase -= M_PI * 2;
+        }
+        if (mSweeping) {
+            if (mGoingUp) {
+                mPhaseIncrement *= mUpScaler;
+                if (mPhaseIncrement > mPhaseIncrementHigh) {
+                    mGoingUp = false;
+                }
+            } else {
+                mPhaseIncrement *= mDownScaler;
+                if (mPhaseIncrement < mPhaseIncrementLow) {
+                    mGoingUp = true;
+                }
+            }
+        }
+    }
+
+    double mAmplitude = 0.01;
+    double mPhase = 0.0;
+    double mPhaseIncrement = 440 * M_PI * 2 / 48000;
+    double mFrameRate = 48000;
+    double mPhaseIncrementLow;
+    double mPhaseIncrementHigh;
+    double mUpScaler = 1.0;
+    double mDownScaler = 1.0;
+    bool   mGoingUp = false;
+    bool   mSweeping = false;
+};
+
+#endif /* SINE_GENERATOR_H */
+
diff --git a/media/liboboe/examples/write_sine/src/write_sine.cpp b/media/liboboe/examples/write_sine/src/write_sine.cpp
new file mode 100644
index 0000000..090f371
--- /dev/null
+++ b/media/liboboe/examples/write_sine/src/write_sine.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using AAudio.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+#include "SineGenerator.h"
+
+#define SAMPLE_RATE   48000
+#define NUM_SECONDS   10
+
+static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
+    const char *modeText = "unknown";
+    switch (mode) {
+    case AAUDIO_SHARING_MODE_EXCLUSIVE:
+        modeText = "EXCLUSIVE";
+        break;
+    case AAUDIO_SHARING_MODE_LEGACY:
+        modeText = "LEGACY";
+        break;
+    case AAUDIO_SHARING_MODE_SHARED:
+        modeText = "SHARED";
+        break;
+    case AAUDIO_SHARING_MODE_PUBLIC_MIX:
+        modeText = "PUBLIC_MIX";
+        break;
+    default:
+        break;
+    }
+    return modeText;
+}
+
+int main(int argc, char **argv)
+{
+    (void)argc; // unused
+
+    aaudio_result_t result = AAUDIO_OK;
+
+    const int requestedSamplesPerFrame = 2;
+    int actualSamplesPerFrame = 0;
+    const int requestedSampleRate = SAMPLE_RATE;
+    int actualSampleRate = 0;
+    const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM16;
+    aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_PCM16;
+
+    const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+    //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_LEGACY;
+    aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_LEGACY;
+
+    AAudioStreamBuilder aaudioBuilder = AAUDIO_STREAM_BUILDER_NONE;
+    AAudioStream aaudioStream = AAUDIO_STREAM_NONE;
+    aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
+    aaudio_size_frames_t framesPerBurst = 0;
+    aaudio_size_frames_t framesToPlay = 0;
+    aaudio_size_frames_t framesLeft = 0;
+    int32_t xRunCount = 0;
+    int16_t *data = nullptr;
+
+    SineGenerator sineOsc1;
+    SineGenerator sineOsc2;
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+    printf("%s - Play a sine wave using AAudio\n", argv[0]);
+
+    // Use an AAudioStreamBuilder to contain requested parameters.
+    result = AAudio_createStreamBuilder(&aaudioBuilder);
+    if (result != AAUDIO_OK) {
+        goto finish;
+    }
+
+    // Request stream properties.
+    result = AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate);
+    if (result != AAUDIO_OK) {
+        goto finish;
+    }
+    result = AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder, requestedSamplesPerFrame);
+    if (result != AAUDIO_OK) {
+        goto finish;
+    }
+    result = AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
+    if (result != AAUDIO_OK) {
+        goto finish;
+    }
+    result = AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
+    if (result != AAUDIO_OK) {
+        goto finish;
+    }
+
+    // Create an AAudioStream using the Builder.
+    result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
+    printf("aaudioStream 0x%08x\n", aaudioStream);
+    if (result != AAUDIO_OK) {
+        goto finish;
+    }
+
+    result = AAudioStream_getState(aaudioStream, &state);
+    printf("after open, state = %s\n", AAudio_convertStreamStateToText(state));
+
+    // Check to see what kind of stream we actually got.
+    result = AAudioStream_getSampleRate(aaudioStream, &actualSampleRate);
+    printf("SampleRate: requested = %d, actual = %d\n", requestedSampleRate, actualSampleRate);
+
+    sineOsc1.setup(440.0, actualSampleRate);
+    sineOsc2.setup(660.0, actualSampleRate);
+
+    result = AAudioStream_getSamplesPerFrame(aaudioStream, &actualSamplesPerFrame);
+    printf("SamplesPerFrame: requested = %d, actual = %d\n",
+            requestedSamplesPerFrame, actualSamplesPerFrame);
+
+    result = AAudioStream_getSharingMode(aaudioStream, &actualSharingMode);
+    printf("SharingMode: requested = %s, actual = %s\n",
+            getSharingModeText(requestedSharingMode),
+            getSharingModeText(actualSharingMode));
+
+    // This is the number of frames that are read in one chunk by a DMA controller
+    // or a DSP or a mixer.
+    result = AAudioStream_getFramesPerBurst(aaudioStream, &framesPerBurst);
+    printf("DataFormat: original framesPerBurst = %d\n",framesPerBurst);
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR - AAudioStream_getFramesPerBurst() returned %d\n", result);
+        goto finish;
+    }
+    // Some DMA might use very short bursts of 16 frames. We don't need to write such small
+    // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
+    while (framesPerBurst < 48) {
+        framesPerBurst *= 2;
+    }
+    printf("DataFormat: final framesPerBurst = %d\n",framesPerBurst);
+
+    AAudioStream_getFormat(aaudioStream, &actualDataFormat);
+    printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
+    // TODO handle other data formats
+
+    // Allocate a buffer for the audio data.
+    data = new int16_t[framesPerBurst * actualSamplesPerFrame];
+    if (data == nullptr) {
+        fprintf(stderr, "ERROR - could not allocate data buffer\n");
+        result = AAUDIO_ERROR_NO_MEMORY;
+        goto finish;
+    }
+
+    // Start the stream.
+    printf("call AAudioStream_requestStart()\n");
+    result = AAudioStream_requestStart(aaudioStream);
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+        goto finish;
+    }
+
+    result = AAudioStream_getState(aaudioStream, &state);
+    printf("after start, state = %s\n", AAudio_convertStreamStateToText(state));
+
+    // Play for a while.
+    framesToPlay = actualSampleRate * NUM_SECONDS;
+    framesLeft = framesToPlay;
+    while (framesLeft > 0) {
+        // Render sine waves to left and right channels.
+        sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerBurst);
+        if (actualSamplesPerFrame > 1) {
+            sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerBurst);
+        }
+
+        // Write audio data to the stream.
+        aaudio_nanoseconds_t timeoutNanos = 100 * AAUDIO_NANOS_PER_MILLISECOND;
+        int minFrames = (framesToPlay < framesPerBurst) ? framesToPlay : framesPerBurst;
+        int actual = AAudioStream_write(aaudioStream, data, minFrames, timeoutNanos);
+        if (actual < 0) {
+            fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", actual);
+            goto finish;
+        } else if (actual == 0) {
+            fprintf(stderr, "WARNING - AAudioStream_write() returned %zd\n", actual);
+            goto finish;
+        }
+        framesLeft -= actual;
+    }
+
+    result = AAudioStream_getXRunCount(aaudioStream, &xRunCount);
+    printf("AAudioStream_getXRunCount %d\n", xRunCount);
+
+finish:
+    delete[] data;
+    AAudioStream_close(aaudioStream);
+    AAudioStreamBuilder_delete(aaudioBuilder);
+    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+    return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/media/liboboe/examples/write_sine/src/write_sine_threaded.cpp b/media/liboboe/examples/write_sine/src/write_sine_threaded.cpp
new file mode 100644
index 0000000..7cde67b
--- /dev/null
+++ b/media/liboboe/examples/write_sine/src/write_sine_threaded.cpp
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using an AAudio background thread.
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+#include "SineGenerator.h"
+
+#define NUM_SECONDS   10
+
+#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
+//#define SHARING_MODE  AAUDIO_SHARING_MODE_LEGACY
+
+// Prototype for a callback.
+typedef int audio_callback_proc_t(float *outputBuffer,
+                                     aaudio_size_frames_t numFrames,
+                                     void *userContext);
+
+static void *SimpleAAudioPlayerThreadProc(void *arg);
+
+/**
+ * Simple wrapper for AAudio that opens a default stream and then calls
+ * a callback function to fill the output buffers.
+ */
+class SimpleAAudioPlayer {
+public:
+    SimpleAAudioPlayer() {}
+    virtual ~SimpleAAudioPlayer() {
+        close();
+    };
+
+    void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+        mRequestedSharingMode = requestedSharingMode;
+    }
+
+    /** Also known as "sample rate"
+     */
+    int32_t getFramesPerSecond() {
+        return mFramesPerSecond;
+    }
+
+    int32_t getSamplesPerFrame() {
+        return mSamplesPerFrame;
+    }
+
+    /**
+     * Open a stream
+     */
+    aaudio_result_t open(audio_callback_proc_t *proc, void *userContext) {
+        mCallbackProc = proc;
+        mUserContext = userContext;
+        aaudio_result_t result = AAUDIO_OK;
+
+        // Use an AAudioStreamBuilder to contain requested parameters.
+        result = AAudio_createStreamBuilder(&mBuilder);
+        if (result != AAUDIO_OK) return result;
+
+        result = AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+        if (result != AAUDIO_OK) goto finish1;
+
+        // Open an AAudioStream using the Builder.
+        result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+        if (result != AAUDIO_OK) goto finish1;
+
+        // Check to see what kind of stream we actually got.
+        result = AAudioStream_getSampleRate(mStream, &mFramesPerSecond);
+        printf("open() mFramesPerSecond = %d\n", mFramesPerSecond);
+        if (result != AAUDIO_OK) goto finish2;
+        result = AAudioStream_getSamplesPerFrame(mStream, &mSamplesPerFrame);
+        printf("open() mSamplesPerFrame = %d\n", mSamplesPerFrame);
+        if (result != AAUDIO_OK) goto finish2;
+
+        // This is the number of frames that are read in one chunk by a DMA controller
+        // or a DSP or a mixer.
+        result = AAudioStream_getFramesPerBurst(mStream, &mFramesPerBurst);
+        if (result != AAUDIO_OK) goto finish2;
+        // Some DMA might use very short bursts. We don't need to write such small
+        // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
+        while (mFramesPerBurst < 48) {
+            mFramesPerBurst *= 2;
+        }
+        printf("DataFormat: final framesPerBurst = %d\n",mFramesPerBurst);
+
+        result = AAudioStream_getFormat(mStream, &mDataFormat);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_getFormat() returned %d\n", result);
+            goto finish2;
+        }
+
+        // Allocate a buffer for the audio data.
+        mOutputBuffer = new float[mFramesPerBurst * mSamplesPerFrame];
+        if (mOutputBuffer == nullptr) {
+            fprintf(stderr, "ERROR - could not allocate data buffer\n");
+            result = AAUDIO_ERROR_NO_MEMORY;
+        }
+
+        // If needed allocate a buffer for converting float to int16_t.
+        if (mDataFormat == AAUDIO_FORMAT_PCM16) {
+            mConversionBuffer = new int16_t[mFramesPerBurst * mSamplesPerFrame];
+            if (mConversionBuffer == nullptr) {
+                fprintf(stderr, "ERROR - could not allocate conversion buffer\n");
+                result = AAUDIO_ERROR_NO_MEMORY;
+            }
+        }
+        return result;
+
+     finish2:
+        AAudioStream_close(mStream);
+        mStream = AAUDIO_HANDLE_INVALID;
+     finish1:
+        AAudioStreamBuilder_delete(mBuilder);
+        mBuilder = AAUDIO_HANDLE_INVALID;
+        return result;
+    }
+
+    aaudio_result_t close() {
+        if (mStream != AAUDIO_HANDLE_INVALID) {
+            stop();
+            printf("call AAudioStream_close(0x%08x)\n", mStream);  fflush(stdout);
+            AAudioStream_close(mStream);
+            mStream = AAUDIO_HANDLE_INVALID;
+            AAudioStreamBuilder_delete(mBuilder);
+            mBuilder = AAUDIO_HANDLE_INVALID;
+            delete mOutputBuffer;
+            mOutputBuffer = nullptr;
+            delete mConversionBuffer;
+            mConversionBuffer = nullptr;
+        }
+        return AAUDIO_OK;
+    }
+
+    // Start a thread that will call the callback proc.
+    aaudio_result_t start() {
+        mEnabled = true;
+        aaudio_nanoseconds_t nanosPerBurst = mFramesPerBurst * AAUDIO_NANOS_PER_SECOND
+                                           / mFramesPerSecond;
+        return AAudioStream_createThread(mStream, nanosPerBurst,
+                                       SimpleAAudioPlayerThreadProc,
+                                       this);
+    }
+
+    // Tell the thread to stop.
+    aaudio_result_t stop() {
+        mEnabled = false;
+        return AAudioStream_joinThread(mStream, nullptr, 2 * AAUDIO_NANOS_PER_SECOND);
+    }
+
+    aaudio_result_t callbackLoop() {
+        int32_t framesWritten = 0;
+        int32_t xRunCount = 0;
+        aaudio_result_t result = AAUDIO_OK;
+
+        result = AAudioStream_requestStart(mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+            return result;
+        }
+
+        // Give up after several burst periods have passed.
+        const int burstsPerTimeout = 8;
+        aaudio_nanoseconds_t nanosPerTimeout =
+                        burstsPerTimeout * mFramesPerBurst * AAUDIO_NANOS_PER_SECOND
+                        / mFramesPerSecond;
+
+        while (mEnabled && result >= 0) {
+            // Call application's callback function to fill the buffer.
+            if (mCallbackProc(mOutputBuffer, mFramesPerBurst, mUserContext)) {
+                mEnabled = false;
+            }
+            // if needed, convert from float to int16_t PCM
+            if (mConversionBuffer != nullptr) {
+                int32_t numSamples = mFramesPerBurst * mSamplesPerFrame;
+                for (int i = 0; i < numSamples; i++) {
+                    mConversionBuffer[i] = (int16_t)(32767.0 * mOutputBuffer[i]);
+                }
+                // Write the application data to stream.
+                result = AAudioStream_write(mStream, mConversionBuffer, mFramesPerBurst, nanosPerTimeout);
+            } else {
+                // Write the application data to stream.
+                result = AAudioStream_write(mStream, mOutputBuffer, mFramesPerBurst, nanosPerTimeout);
+            }
+            framesWritten += result;
+            if (result < 0) {
+                fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", result);
+            }
+        }
+
+        result = AAudioStream_getXRunCount(mStream, &xRunCount);
+        printf("AAudioStream_getXRunCount %d\n", xRunCount);
+
+        result = AAudioStream_requestStop(mStream);
+        if (result != AAUDIO_OK) {
+            fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+            return result;
+        }
+
+        return result;
+    }
+
+private:
+    AAudioStreamBuilder   mBuilder = AAUDIO_HANDLE_INVALID;
+    AAudioStream          mStream = AAUDIO_HANDLE_INVALID;
+    float            *  mOutputBuffer = nullptr;
+    int16_t          *  mConversionBuffer = nullptr;
+
+    audio_callback_proc_t * mCallbackProc = nullptr;
+    void             *  mUserContext = nullptr;
+    aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
+    int32_t             mSamplesPerFrame = 0;
+    int32_t             mFramesPerSecond = 0;
+    aaudio_size_frames_t  mFramesPerBurst = 0;
+    aaudio_audio_format_t mDataFormat = AAUDIO_FORMAT_PCM16;
+
+    volatile bool       mEnabled = false; // used to request that callback exit its loop
+};
+
+static void *SimpleAAudioPlayerThreadProc(void *arg) {
+    SimpleAAudioPlayer *player = (SimpleAAudioPlayer *) arg;
+    player->callbackLoop();
+    return nullptr;
+}
+
+// Application data that gets passed to the callback.
+typedef struct SineThreadedData_s {
+    SineGenerator  sineOsc1;
+    SineGenerator  sineOsc2;
+    int32_t        samplesPerFrame = 0;
+} SineThreadedData_t;
+
+// Callback function that fills the audio output buffer.
+int MyCallbackProc(float *outputBuffer, int32_t numFrames, void *userContext) {
+    SineThreadedData_t *data = (SineThreadedData_t *) userContext;
+    // Render sine waves to left and right channels.
+    data->sineOsc1.render(&outputBuffer[0], data->samplesPerFrame, numFrames);
+    if (data->samplesPerFrame > 1) {
+        data->sineOsc2.render(&outputBuffer[1], data->samplesPerFrame, numFrames);
+    }
+    return 0;
+}
+
+int main(int argc, char **argv)
+{
+    (void)argc; // unused
+    SimpleAAudioPlayer player;
+    SineThreadedData_t myData;
+    aaudio_result_t result;
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+    printf("%s - Play a sine wave using an AAudio Thread\n", argv[0]);
+
+    result = player.open(MyCallbackProc, &myData);
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR -  player.open() returned %d\n", result);
+        goto error;
+    }
+    printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
+    printf("player.getSamplesPerFrame() = %d\n", player.getSamplesPerFrame());
+    myData.sineOsc1.setup(440.0, 48000);
+    myData.sineOsc1.setSweep(300.0, 600.0, 5.0);
+    myData.sineOsc2.setup(660.0, 48000);
+    myData.sineOsc2.setSweep(350.0, 900.0, 7.0);
+    myData.samplesPerFrame = player.getSamplesPerFrame();
+
+    result = player.start();
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR -  player.start() returned %d\n", result);
+        goto error;
+    }
+
+    printf("Sleep for %d seconds while audio plays in a background thread.\n", NUM_SECONDS);
+    {
+        // FIXME sleep is not an NDK API
+        // sleep(NUM_SECONDS);
+        const struct timespec request = { .tv_sec = NUM_SECONDS, .tv_nsec = 0 };
+        (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+    }
+    printf("Woke up now.\n");
+
+    result = player.stop();
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR -  player.stop() returned %d\n", result);
+        goto error;
+    }
+    result = player.close();
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR -  player.close() returned %d\n", result);
+        goto error;
+    }
+
+    printf("SUCCESS\n");
+    return EXIT_SUCCESS;
+error:
+    player.close();
+    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+    return EXIT_FAILURE;
+}
+
diff --git a/media/liboboe/examples/write_sine/static/Android.mk b/media/liboboe/examples/write_sine/static/Android.mk
new file mode 100644
index 0000000..7c8d17c
--- /dev/null
+++ b/media/liboboe/examples/write_sine/static/Android.mk
@@ -0,0 +1,34 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := examples
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+# TODO reorganize folders to avoid using ../
+LOCAL_SRC_FILES:= ../src/write_sine.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+                          libbinder libcutils libutils \
+                          libaudioclient liblog libtinyalsa
+LOCAL_STATIC_LIBRARIES := liboboe
+
+LOCAL_MODULE := write_sine
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= ../src/write_sine_threaded.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+                          libbinder libcutils libutils \
+                          libaudioclient liblog libtinyalsa
+LOCAL_STATIC_LIBRARIES := liboboe
+
+LOCAL_MODULE := write_sine_threaded
+include $(BUILD_EXECUTABLE)
diff --git a/media/liboboe/examples/write_sine/static/README.md b/media/liboboe/examples/write_sine/static/README.md
new file mode 100644
index 0000000..6e26d7b
--- /dev/null
+++ b/media/liboboe/examples/write_sine/static/README.md
@@ -0,0 +1,2 @@
+Makefile for building simple command line examples.
+They link with AAudio as a static library.
diff --git a/media/liboboe/include/aaudio/AAudio.h b/media/liboboe/include/aaudio/AAudio.h
new file mode 100644
index 0000000..7324137
--- /dev/null
+++ b/media/liboboe/include/aaudio/AAudio.h
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This is the 'C' ABI for AAudio.
+ */
+#ifndef AAUDIO_AAUDIO_H
+#define AAUDIO_AAUDIO_H
+
+#include "AAudioDefinitions.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef aaudio_handle_t AAudioStream;
+typedef aaudio_handle_t AAudioStreamBuilder;
+
+#define AAUDIO_STREAM_NONE         ((AAudioStream)AAUDIO_HANDLE_INVALID)
+#define AAUDIO_STREAM_BUILDER_NONE ((AAudioStreamBuilder)AAUDIO_HANDLE_INVALID)
+
+/* AAUDIO_API will probably get defined in a Makefile for a specific platform. */
+#ifndef AAUDIO_API
+#define AAUDIO_API /* for exporting symbols */
+#endif
+
+// ============================================================
+// Audio System
+// ============================================================
+
+/**
+ * @return time in the same clock domain as the timestamps
+ */
+AAUDIO_API aaudio_nanoseconds_t AAudio_getNanoseconds(aaudio_clockid_t clockid);
+
+/**
+ * The text is the ASCII symbol corresponding to the returnCode,
+ * or an English message saying the returnCode is unrecognized.
+ * This is intended for developers to use when debugging.
+ * It is not for display to users.
+ *
+ * @return pointer to a text representation of an AAudio result code.
+ */
+AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode);
+
+/**
+ * The text is the ASCII symbol corresponding to the stream state,
+ * or an English message saying the state is unrecognized.
+ * This is intended for developers to use when debugging.
+ * It is not for display to users.
+ *
+ * @return pointer to a text representation of an AAudio state.
+ */
+AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state);
+
+// ============================================================
+// StreamBuilder
+// ============================================================
+
+/**
+ * Create a StreamBuilder that can be used to open a Stream.
+ *
+ * The deviceId is initially unspecified, meaning that the current default device will be used.
+ *
+ * The default direction is AAUDIO_DIRECTION_OUTPUT.
+ * The default sharing mode is AAUDIO_SHARING_MODE_LEGACY.
+ * The data format, samplesPerFrames and sampleRate are unspecified and will be
+ * chosen by the device when it is opened.
+ *
+ * AAudioStreamBuilder_delete() must be called when you are done using the builder.
+ */
+AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder *builder);
+
+/**
+ * Request an audio device identified device using an ID.
+ * The ID is platform specific.
+ * On Android, for example, the ID could be obtained from the Java AudioManager.
+ *
+ * By default, the primary device will be used.
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param deviceId platform specific identifier or AAUDIO_DEVICE_UNSPECIFIED
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
+                                                     aaudio_device_id_t deviceId);
+/**
+ * Passes back requested device ID.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDeviceId(AAudioStreamBuilder builder,
+                                                     aaudio_device_id_t *deviceId);
+
+/**
+ * Request a sample rate in Hz.
+ * The stream may be opened with a different sample rate.
+ * So the application should query for the actual rate after the stream is opened.
+ *
+ * Technically, this should be called the "frame rate" or "frames per second",
+ * because it refers to the number of complete frames transferred per second.
+ * But it is traditionally called "sample rate". Se we use that term.
+ *
+ * Default is AAUDIO_UNSPECIFIED.
+ *
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
+                                                       aaudio_sample_rate_t sampleRate);
+
+/**
+ * Returns sample rate in Hertz (samples per second).
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSampleRate(AAudioStreamBuilder builder,
+                                                       aaudio_sample_rate_t *sampleRate);
+
+
+/**
+ * Request a number of samples per frame.
+ * The stream may be opened with a different value.
+ * So the application should query for the actual value after the stream is opened.
+ *
+ * Default is AAUDIO_UNSPECIFIED.
+ *
+ * Note, this quantity is sometimes referred to as "channel count".
+ *
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
+                                                   int32_t samplesPerFrame);
+
+/**
+ * Note, this quantity is sometimes referred to as "channel count".
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param samplesPerFrame pointer to a variable to be set to samplesPerFrame.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSamplesPerFrame(AAudioStreamBuilder builder,
+                                                   int32_t *samplesPerFrame);
+
+
+/**
+ * Request a sample data format, for example AAUDIO_FORMAT_PCM_I16.
+ * The application should query for the actual format after the stream is opened.
+ *
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
+                                                   aaudio_audio_format_t format);
+
+/**
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getFormat(AAudioStreamBuilder builder,
+                                                   aaudio_audio_format_t *format);
+
+/**
+ * Request a mode for sharing the device.
+ * The requested sharing mode may not be available.
+ * So the application should query for the actual mode after the stream is opened.
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param sharingMode AAUDIO_SHARING_MODE_LEGACY or AAUDIO_SHARING_MODE_EXCLUSIVE
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
+                                                        aaudio_sharing_mode_t sharingMode);
+
+/**
+ * Return requested sharing mode.
+ * @return AAUDIO_OK or a negative error
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSharingMode(AAudioStreamBuilder builder,
+                                                        aaudio_sharing_mode_t *sharingMode);
+
+/**
+ * Request the direction for a stream. The default is AAUDIO_DIRECTION_OUTPUT.
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param direction AAUDIO_DIRECTION_OUTPUT or AAUDIO_DIRECTION_INPUT
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
+                                                      aaudio_direction_t direction);
+
+/**
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param direction pointer to a variable to be set to the currently requested direction.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDirection(AAudioStreamBuilder builder,
+                                                      aaudio_direction_t *direction);
+
+/**
+ * Open a stream based on the options in the StreamBuilder.
+ *
+ * AAudioStream_close must be called when finished with the stream to recover
+ * the memory and to free the associated resources.
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @param stream pointer to a variable to receive the new stream handle
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t  AAudioStreamBuilder_openStream(AAudioStreamBuilder builder,
+                                                     AAudioStream *stream);
+
+/**
+ * Delete the resources associated with the StreamBuilder.
+ *
+ * @param builder handle provided by AAudio_createStreamBuilder()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder builder);
+
+// ============================================================
+// Stream Control
+// ============================================================
+
+/**
+ * Free the resources associated with a stream created by AAudioStreamBuilder_openStream()
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t  AAudioStream_close(AAudioStream stream);
+
+/**
+ * Asynchronously request to start playing the stream. For output streams, one should
+ * write to the stream to fill the buffer before starting.
+ * Otherwise it will underflow.
+ * After this call the state will be in AAUDIO_STREAM_STATE_STARTING or AAUDIO_STREAM_STATE_STARTED.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream stream);
+
+/**
+ * Asynchronous request for the stream to pause.
+ * Pausing a stream will freeze the data flow but not flush any buffers.
+ * Use AAudioStream_Start() to resume playback after a pause.
+ * After this call the state will be in AAUDIO_STREAM_STATE_PAUSING or AAUDIO_STREAM_STATE_PAUSED.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream stream);
+
+/**
+ * Asynchronous request for the stream to flush.
+ * Flushing will discard any pending data.
+ * This call only works if the stream is pausing or paused. TODO review
+ * Frame counters are not reset by a flush. They may be advanced.
+ * After this call the state will be in AAUDIO_STREAM_STATE_FLUSHING or AAUDIO_STREAM_STATE_FLUSHED.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t  AAudioStream_requestFlush(AAudioStream stream);
+
+/**
+ * Asynchronous request for the stream to stop.
+ * The stream will stop after all of the data currently buffered has been played.
+ * After this call the state will be in AAUDIO_STREAM_STATE_STOPPING or AAUDIO_STREAM_STATE_STOPPED.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t  AAudioStream_requestStop(AAudioStream stream);
+
+/**
+ * Query the current state, eg. AAUDIO_STREAM_STATE_PAUSING
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param state pointer to a variable that will be set to the current state
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getState(AAudioStream stream, aaudio_stream_state_t *state);
+
+/**
+ * Wait until the current state no longer matches the input state.
+ *
+ * <pre><code>
+ * aaudio_stream_state_t currentState;
+ * aaudio_result_t result = AAudioStream_getState(stream, &currentState);
+ * while (result == AAUDIO_OK && currentState != AAUDIO_STREAM_STATE_PAUSING) {
+ *     result = AAudioStream_waitForStateChange(
+ *                                   stream, currentState, &currentState, MY_TIMEOUT_NANOS);
+ * }
+ * </code></pre>
+ *
+ * @param stream A handle provided by AAudioStreamBuilder_openStream()
+ * @param inputState The state we want to avoid.
+ * @param nextState Pointer to a variable that will be set to the new state.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream stream,
+                                            aaudio_stream_state_t inputState,
+                                            aaudio_stream_state_t *nextState,
+                                            aaudio_nanoseconds_t timeoutNanoseconds);
+
+// ============================================================
+// Stream I/O
+// ============================================================
+
+/**
+ * Read data from the stream.
+ *
+ * The call will wait until the read is complete or until it runs out of time.
+ * If timeoutNanos is zero then this call will not wait.
+ *
+ * Note that timeoutNanoseconds is a relative duration in wall clock time.
+ * Time will not stop if the thread is asleep.
+ * So it will be implemented using CLOCK_BOOTTIME.
+ *
+ * This call is "strong non-blocking" unless it has to wait for data.
+ *
+ * @param stream A stream created using AAudioStreamBuilder_openStream().
+ * @param buffer The address of the first sample.
+ * @param numFrames Number of frames to read. Only complete frames will be written.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return The number of frames actually written or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream stream,
+                               void *buffer,
+                               aaudio_size_frames_t numFrames,
+                               aaudio_nanoseconds_t timeoutNanoseconds);
+
+/**
+ * Write data to the stream.
+ *
+ * The call will wait until the write is complete or until it runs out of time.
+ * If timeoutNanos is zero then this call will not wait.
+ *
+ * Note that timeoutNanoseconds is a relative duration in wall clock time.
+ * Time will not stop if the thread is asleep.
+ * So it will be implemented using CLOCK_BOOTTIME.
+ *
+ * This call is "strong non-blocking" unless it has to wait for room in the buffer.
+ *
+ * @param stream A stream created using AAudioStreamBuilder_openStream().
+ * @param buffer The address of the first sample.
+ * @param numFrames Number of frames to write. Only complete frames will be written.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return The number of frames actually written or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream stream,
+                               const void *buffer,
+                               aaudio_size_frames_t numFrames,
+                               aaudio_nanoseconds_t timeoutNanoseconds);
+
+
+// ============================================================
+// High priority audio threads
+// ============================================================
+
+typedef void *(aaudio_audio_thread_proc_t)(void *);
+
+/**
+ * Create a thread associated with a stream. The thread has special properties for
+ * low latency audio performance. This thread can be used to implement a callback API.
+ *
+ * Only one thread may be associated with a stream.
+ *
+ * Note that this API is in flux.
+ *
+ * @param stream A stream created using AAudioStreamBuilder_openStream().
+ * @param periodNanoseconds the estimated period at which the audio thread will need to wake up
+ * @param startRoutine your thread entry point
+ * @param arg an argument that will be passed to your thread entry point
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_createThread(AAudioStream stream,
+                                     aaudio_nanoseconds_t periodNanoseconds,
+                                     aaudio_audio_thread_proc_t *threadProc,
+                                     void *arg);
+
+/**
+ * Wait until the thread exits or an error occurs.
+ * The thread handle will be deleted.
+ *
+ * @param stream A stream created using AAudioStreamBuilder_openStream().
+ * @param returnArg a pointer to a variable to receive the return value
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_joinThread(AAudioStream stream,
+                                   void **returnArg,
+                                   aaudio_nanoseconds_t timeoutNanoseconds);
+
+// ============================================================
+// Stream - queries
+// ============================================================
+
+
+/**
+ * This can be used to adjust the latency of the buffer by changing
+ * the threshold where blocking will occur.
+ * By combining this with AAudioStream_getUnderrunCount(), the latency can be tuned
+ * at run-time for each device.
+ *
+ * This cannot be set higher than AAudioStream_getBufferCapacity().
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param requestedFrames requested number of frames that can be filled without blocking
+ * @param actualFrames receives final number of frames
+ * @return AAUDIO_OK or a negative error
+ */
+AAUDIO_API aaudio_result_t AAudioStream_setBufferSize(AAudioStream stream,
+                                                aaudio_size_frames_t requestedFrames,
+                                                aaudio_size_frames_t *actualFrames);
+
+/**
+ * Query the maximum number of frames that can be filled without blocking.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the buffer size
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getBufferSize(AAudioStream stream, aaudio_size_frames_t *frames);
+
+/**
+ * Query the number of frames that are read or written by the endpoint at one time.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the burst size
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getFramesPerBurst(AAudioStream stream, aaudio_size_frames_t *frames);
+
+/**
+ * Query maximum buffer capacity in frames.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the buffer capacity
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getBufferCapacity(AAudioStream stream, aaudio_size_frames_t *frames);
+
+/**
+ * An XRun is an Underrun or an Overrun.
+ * During playing, an underrun will occur if the stream is not written in time
+ * and the system runs out of valid data.
+ * During recording, an overrun will occur if the stream is not read in time
+ * and there is no place to put the incoming data so it is discarded.
+ *
+ * An underrun or overrun can cause an audible "pop" or "glitch".
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param xRunCount pointer to variable to receive the underrun or overrun count
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getXRunCount(AAudioStream stream, int32_t *xRunCount);
+
+/**
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param sampleRate pointer to variable to receive the actual sample rate
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getSampleRate(AAudioStream stream, aaudio_sample_rate_t *sampleRate);
+
+/**
+ * The samplesPerFrame is also known as channelCount.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param samplesPerFrame pointer to variable to receive the actual samples per frame
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getSamplesPerFrame(AAudioStream stream, int32_t *samplesPerFrame);
+
+/**
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param deviceId pointer to variable to receive the actual device ID
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getDeviceId(AAudioStream stream, aaudio_device_id_t *deviceId);
+
+/**
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param format pointer to variable to receive the actual data format
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getFormat(AAudioStream stream, aaudio_audio_format_t *format);
+
+/**
+ * Provide actual sharing mode.
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param sharingMode pointer to variable to receive the actual sharing mode
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getSharingMode(AAudioStream stream,
+                                        aaudio_sharing_mode_t *sharingMode);
+
+/**
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param direction pointer to a variable to be set to the current direction.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getDirection(AAudioStream stream, aaudio_direction_t *direction);
+
+/**
+ * Passes back the number of frames that have been written since the stream was created.
+ * For an output stream, this will be advanced by the application calling write().
+ * For an input stream, this will be advanced by the device or service.
+ *
+ * The frame position is monotonically increasing.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the frames written
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getFramesWritten(AAudioStream stream,
+                                                   aaudio_position_frames_t *frames);
+
+/**
+ * Passes back the number of frames that have been read since the stream was created.
+ * For an output stream, this will be advanced by the device or service.
+ * For an input stream, this will be advanced by the application calling read().
+ *
+ * The frame position is monotonically increasing.
+ *
+ * @param stream handle provided by AAudioStreamBuilder_openStream()
+ * @param frames pointer to variable to receive the frames written
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getFramesRead(AAudioStream stream, aaudio_position_frames_t *frames);
+
+/**
+ * Passes back the time at which a particular frame was presented.
+ * This can be used to synchronize audio with video or MIDI.
+ * It can also be used to align a recorded stream with a playback stream.
+ *
+ * Timestamps are only valid when the stream is in AAUDIO_STREAM_STATE_STARTED.
+ * AAUDIO_ERROR_INVALID_STATE will be returned if the stream is not started.
+ * Note that because requestStart() is asynchronous, timestamps will not be valid until
+ * a short time after calling requestStart().
+ * So AAUDIO_ERROR_INVALID_STATE should not be considered a fatal error.
+ * Just try calling again later.
+ *
+ * If an error occurs, then the position and time will not be modified.
+ *
+ * The position and time passed back are monotonically increasing.
+ *
+ * @param stream A handle provided by AAudioStreamBuilder_openStream()
+ * @param clockid AAUDIO_CLOCK_MONOTONIC or AAUDIO_CLOCK_BOOTTIME
+ * @param framePosition pointer to a variable to receive the position
+ * @param timeNanoseconds pointer to a variable to receive the time
+ * @return AAUDIO_OK or a negative error
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream stream,
+                                      aaudio_clockid_t clockid,
+                                      aaudio_position_frames_t *framePosition,
+                                      aaudio_nanoseconds_t *timeNanoseconds);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //AAUDIO_AAUDIO_H
diff --git a/media/liboboe/include/aaudio/AAudioDefinitions.h b/media/liboboe/include/aaudio/AAudioDefinitions.h
new file mode 100644
index 0000000..979b8c9
--- /dev/null
+++ b/media/liboboe/include/aaudio/AAudioDefinitions.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIODEFINITIONS_H
+#define AAUDIO_AAUDIODEFINITIONS_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int32_t  aaudio_handle_t; // negative handles are error codes
+typedef int32_t  aaudio_result_t;
+/**
+ * A platform specific identifier for a device.
+ */
+typedef int32_t  aaudio_device_id_t;
+typedef int32_t  aaudio_sample_rate_t;
+/** This is used for small quantities such as the number of frames in a buffer. */
+typedef int32_t  aaudio_size_frames_t;
+/** This is used for small quantities such as the number of bytes in a frame. */
+typedef int32_t  aaudio_size_bytes_t;
+/**
+ * This is used for large quantities, such as the number of frames that have
+ * been played since a stream was started.
+ * At 48000 Hz, a 32-bit integer would wrap around in just over 12 hours.
+ */
+typedef int64_t  aaudio_position_frames_t;
+
+typedef int64_t  aaudio_nanoseconds_t;
+
+/**
+ * This is used to represent a value that has not been specified.
+ * For example, an application could use AAUDIO_UNSPECIFIED to indicate
+ * that is did not not care what the specific value of a parameter was
+ * and would accept whatever it was given.
+ */
+#define AAUDIO_UNSPECIFIED           0
+#define AAUDIO_DEVICE_UNSPECIFIED    ((aaudio_device_id_t) -1)
+#define AAUDIO_NANOS_PER_MICROSECOND ((int64_t)1000)
+#define AAUDIO_NANOS_PER_MILLISECOND (AAUDIO_NANOS_PER_MICROSECOND * 1000)
+#define AAUDIO_MILLIS_PER_SECOND     1000
+#define AAUDIO_NANOS_PER_SECOND      (AAUDIO_NANOS_PER_MILLISECOND * AAUDIO_MILLIS_PER_SECOND)
+
+#define AAUDIO_HANDLE_INVALID     ((aaudio_handle_t)-1)
+
+enum aaudio_direction_t {
+    AAUDIO_DIRECTION_OUTPUT,
+    AAUDIO_DIRECTION_INPUT,
+    AAUDIO_DIRECTION_COUNT // This should always be last.
+};
+
+enum aaudio_audio_format_t {
+    AAUDIO_FORMAT_INVALID = -1,
+    AAUDIO_FORMAT_UNSPECIFIED = 0,
+    AAUDIO_FORMAT_PCM_I16,
+    AAUDIO_FORMAT_PCM_FLOAT,
+    AAUDIO_FORMAT_PCM_I8_24,
+    AAUDIO_FORMAT_PCM_I32
+};
+
+// TODO These are deprecated. Remove these aliases once all references are replaced.
+#define AAUDIO_FORMAT_PCM16    AAUDIO_FORMAT_PCM_I16
+#define AAUDIO_FORMAT_PCM824   AAUDIO_FORMAT_PCM_I8_24
+#define AAUDIO_FORMAT_PCM32    AAUDIO_FORMAT_PCM_I32
+
+enum {
+    AAUDIO_OK,
+    AAUDIO_ERROR_BASE = -900, // TODO review
+    AAUDIO_ERROR_DISCONNECTED,
+    AAUDIO_ERROR_ILLEGAL_ARGUMENT,
+    AAUDIO_ERROR_INCOMPATIBLE,
+    AAUDIO_ERROR_INTERNAL, // an underlying API returned an error code
+    AAUDIO_ERROR_INVALID_STATE,
+    AAUDIO_ERROR_UNEXPECTED_STATE,
+    AAUDIO_ERROR_UNEXPECTED_VALUE,
+    AAUDIO_ERROR_INVALID_HANDLE,
+    AAUDIO_ERROR_INVALID_QUERY,
+    AAUDIO_ERROR_UNIMPLEMENTED,
+    AAUDIO_ERROR_UNAVAILABLE,
+    AAUDIO_ERROR_NO_FREE_HANDLES,
+    AAUDIO_ERROR_NO_MEMORY,
+    AAUDIO_ERROR_NULL,
+    AAUDIO_ERROR_TIMEOUT,
+    AAUDIO_ERROR_WOULD_BLOCK,
+    AAUDIO_ERROR_INVALID_ORDER,
+    AAUDIO_ERROR_OUT_OF_RANGE,
+    AAUDIO_ERROR_NO_SERVICE
+};
+
+typedef enum {
+    AAUDIO_CLOCK_MONOTONIC, // Clock since booted, pauses when CPU is sleeping.
+    AAUDIO_CLOCK_BOOTTIME,  // Clock since booted, runs all the time.
+    AAUDIO_CLOCK_COUNT // This should always be last.
+} aaudio_clockid_t;
+
+typedef enum
+{
+    AAUDIO_STREAM_STATE_UNINITIALIZED = 0,
+    AAUDIO_STREAM_STATE_OPEN,
+    AAUDIO_STREAM_STATE_STARTING,
+    AAUDIO_STREAM_STATE_STARTED,
+    AAUDIO_STREAM_STATE_PAUSING,
+    AAUDIO_STREAM_STATE_PAUSED,
+    AAUDIO_STREAM_STATE_FLUSHING,
+    AAUDIO_STREAM_STATE_FLUSHED,
+    AAUDIO_STREAM_STATE_STOPPING,
+    AAUDIO_STREAM_STATE_STOPPED,
+    AAUDIO_STREAM_STATE_CLOSING,
+    AAUDIO_STREAM_STATE_CLOSED,
+} aaudio_stream_state_t;
+
+// TODO review API
+typedef enum {
+    /**
+     * This will use an AudioTrack object for playing audio
+     * and an AudioRecord for recording data.
+     */
+    AAUDIO_SHARING_MODE_LEGACY,
+    /**
+     * This will be the only stream using a particular source or sink.
+     * This mode will provide the lowest possible latency.
+     * You should close EXCLUSIVE streams immediately when you are not using them.
+     */
+    AAUDIO_SHARING_MODE_EXCLUSIVE,
+    /**
+     * Multiple applications will be mixed by the AAudio Server.
+     * This will have higher latency than the EXCLUSIVE mode.
+     */
+    AAUDIO_SHARING_MODE_SHARED,
+    /**
+     * Multiple applications will do their own mixing into a memory mapped buffer.
+     * It may be possible for malicious applications to read the data produced by
+     * other apps. So do not use this for private data such as telephony or messaging.
+     */
+    AAUDIO_SHARING_MODE_PUBLIC_MIX,
+    AAUDIO_SHARING_MODE_COUNT // This should always be last.
+} aaudio_sharing_mode_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AAUDIO_AAUDIODEFINITIONS_H
diff --git a/media/liboboe/include/oboe/NOTICE b/media/liboboe/include/aaudio/NOTICE
similarity index 100%
rename from media/liboboe/include/oboe/NOTICE
rename to media/liboboe/include/aaudio/NOTICE
diff --git a/media/liboboe/include/oboe/README.md b/media/liboboe/include/aaudio/README.md
similarity index 69%
rename from media/liboboe/include/oboe/README.md
rename to media/liboboe/include/aaudio/README.md
index de60d03..8c4ae51 100644
--- a/media/liboboe/include/oboe/README.md
+++ b/media/liboboe/include/aaudio/README.md
@@ -1,4 +1,4 @@
-Oboe Audio headers
+AAudio Audio headers
 
 This folder contains the public header files.
 
diff --git a/media/liboboe/include/oboe/OboeAudio.h b/media/liboboe/include/oboe/OboeAudio.h
deleted file mode 100644
index 2181b8c..0000000
--- a/media/liboboe/include/oboe/OboeAudio.h
+++ /dev/null
@@ -1,557 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This is the 'C' ABI for Oboe.
- */
-#ifndef OBOE_OBOEAUDIO_H
-#define OBOE_OBOEAUDIO_H
-
-#include "OboeDefinitions.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef int32_t OboeDeviceId;
-typedef oboe_handle_t OboeStream;
-typedef oboe_handle_t OboeStreamBuilder;
-
-#define OBOE_STREAM_NONE         ((OboeStream)OBOE_HANDLE_INVALID)
-#define OBOE_STREAM_BUILDER_NONE ((OboeStreamBuilder)OBOE_HANDLE_INVALID)
-
-/* OBOE_API will probably get defined in a Makefile for a specific platform. */
-#ifndef OBOE_API
-#define OBOE_API /* for exporting symbols */
-#endif
-
-// ============================================================
-// Audio System
-// ============================================================
-
-/**
- * @return time in the same clock domain as the timestamps
- */
-OBOE_API oboe_nanoseconds_t Oboe_getNanoseconds(oboe_clockid_t clockid);
-
-/**
- * The text is the ASCII symbol corresponding to the returnCode,
- * or an English message saying the returnCode is unrecognized.
- * This is intended for developers to use when debugging.
- * It is not for display to users.
- *
- * @return pointer to a text representation of an Oboe result code.
- */
-OBOE_API const char * Oboe_convertResultToText(oboe_result_t returnCode);
-
-/**
- * The text is the ASCII symbol corresponding to the stream state,
- * or an English message saying the state is unrecognized.
- * This is intended for developers to use when debugging.
- * It is not for display to users.
- *
- * @return pointer to a text representation of an Oboe state.
- */
-OBOE_API const char * Oboe_convertStreamStateToText(oboe_stream_state_t state);
-
-// ============================================================
-// StreamBuilder
-// ============================================================
-
-/**
- * Create a StreamBuilder that can be used to open a Stream.
- *
- * The deviceId is initially unspecified, meaning that the current default device will be used.
- *
- * The default direction is OBOE_DIRECTION_OUTPUT.
- * The default sharing mode is OBOE_SHARING_MODE_LEGACY.
- * The data format, samplesPerFrames and sampleRate are unspecified and will be
- * chosen by the device when it is opened.
- *
- * OboeStreamBuilder_delete() must be called when you are done using the builder.
- */
-OBOE_API oboe_result_t Oboe_createStreamBuilder(OboeStreamBuilder *builder);
-
-/**
- * Request an audio device identified device using an ID.
- * The ID is platform specific.
- * On Android, for example, the ID could be obtained from the Java AudioManager.
- *
- * By default, the primary device will be used.
- *
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setDeviceId(OboeStreamBuilder builder,
-                                                     OboeDeviceId deviceId);
-
-/**
- * Request a sample rate in Hz.
- * The stream may be opened with a different sample rate.
- * So the application should query for the actual rate after the stream is opened.
- *
- * Technically, this should be called the "frame rate" or "frames per second",
- * because it refers to the number of complete frames transferred per second.
- * But it is traditionally called "sample rate". Se we use that term.
- *
- * Default is OBOE_UNSPECIFIED.
- *
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setSampleRate(OboeStreamBuilder builder,
-                                              oboe_sample_rate_t sampleRate);
-
-/**
- * Returns sample rate in Hertz (samples per second).
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_getSampleRate(OboeStreamBuilder builder,
-                                              oboe_sample_rate_t *sampleRate);
-
-
-/**
- * Request a number of samples per frame.
- * The stream may be opened with a different value.
- * So the application should query for the actual value after the stream is opened.
- *
- * Default is OBOE_UNSPECIFIED.
- *
- * Note, this quantity is sometimes referred to as "channel count".
- *
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setSamplesPerFrame(OboeStreamBuilder builder,
-                                                   int32_t samplesPerFrame);
-
-/**
- * Note, this quantity is sometimes referred to as "channel count".
- *
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @param samplesPerFrame pointer to a variable to be set to samplesPerFrame.
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_getSamplesPerFrame(OboeStreamBuilder builder,
-                                                   int32_t *samplesPerFrame);
-
-
-/**
- * Request a sample data format, for example OBOE_AUDIO_FORMAT_PCM16.
- * The application should query for the actual format after the stream is opened.
- *
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setFormat(OboeStreamBuilder builder,
-                                                   oboe_audio_format_t format);
-
-/**
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_getFormat(OboeStreamBuilder builder,
-                                                   oboe_audio_format_t *format);
-
-/**
- * Request a mode for sharing the device.
- * The requested sharing mode may not be available.
- * So the application should query for the actual mode after the stream is opened.
- *
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @param sharingMode OBOE_SHARING_MODE_LEGACY or OBOE_SHARING_MODE_EXCLUSIVE
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setSharingMode(OboeStreamBuilder builder,
-                                                        oboe_sharing_mode_t sharingMode);
-
-/**
- * Return requested sharing mode.
- * @return OBOE_OK or a negative error
- */
-OBOE_API oboe_result_t OboeStreamBuilder_getSharingMode(OboeStreamBuilder builder,
-                                                        oboe_sharing_mode_t *sharingMode);
-
-/**
- * Request the direction for a stream. The default is OBOE_DIRECTION_OUTPUT.
- *
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @param direction OBOE_DIRECTION_OUTPUT or OBOE_DIRECTION_INPUT
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_setDirection(OboeStreamBuilder builder,
-                                                      oboe_direction_t direction);
-
-/**
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @param direction pointer to a variable to be set to the currently requested direction.
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStreamBuilder_getDirection(OboeStreamBuilder builder,
-                                                      oboe_direction_t *direction);
-
-/**
- * Open a stream based on the options in the StreamBuilder.
- *
- * OboeStream_close must be called when finished with the stream to recover
- * the memory and to free the associated resources.
- *
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @param stream pointer to a variable to receive the new stream handle
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t  OboeStreamBuilder_openStream(OboeStreamBuilder builder,
-                                                     OboeStream *stream);
-
-/**
- * Delete the resources associated with the StreamBuilder.
- *
- * @param builder handle provided by Oboe_createStreamBuilder()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t  OboeStreamBuilder_delete(OboeStreamBuilder builder);
-
-// ============================================================
-// Stream Control
-// ============================================================
-
-/**
- * Free the resources associated with a stream created by OboeStreamBuilder_openStream()
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t  OboeStream_close(OboeStream stream);
-
-/**
- * Asynchronously request to start playing the stream. For output streams, one should
- * write to the stream to fill the buffer before starting.
- * Otherwise it will underflow.
- * After this call the state will be in OBOE_STREAM_STATE_STARTING or OBOE_STREAM_STATE_STARTED.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t  OboeStream_requestStart(OboeStream stream);
-
-/**
- * Asynchronous request for the stream to pause.
- * Pausing a stream will freeze the data flow but not flush any buffers.
- * Use OboeStream_Start() to resume playback after a pause.
- * After this call the state will be in OBOE_STREAM_STATE_PAUSING or OBOE_STREAM_STATE_PAUSED.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t  OboeStream_requestPause(OboeStream stream);
-
-/**
- * Asynchronous request for the stream to flush.
- * Flushing will discard any pending data.
- * This call only works if the stream is pausing or paused. TODO review
- * Frame counters are not reset by a flush. They may be advanced.
- * After this call the state will be in OBOE_STREAM_STATE_FLUSHING or OBOE_STREAM_STATE_FLUSHED.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t  OboeStream_requestFlush(OboeStream stream);
-
-/**
- * Asynchronous request for the stream to stop.
- * The stream will stop after all of the data currently buffered has been played.
- * After this call the state will be in OBOE_STREAM_STATE_STOPPING or OBOE_STREAM_STATE_STOPPED.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t  OboeStream_requestStop(OboeStream stream);
-
-/**
- * Query the current state, eg. OBOE_STREAM_STATE_PAUSING
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param state pointer to a variable that will be set to the current state
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getState(OboeStream stream, oboe_stream_state_t *state);
-
-/**
- * Wait until the current state no longer matches the input state.
- *
- * <pre><code>
- * oboe_stream_state_t currentState;
- * oboe_result_t result = OboeStream_getState(stream, &currentState);
- * while (result == OBOE_OK && currentState != OBOE_STREAM_STATE_PAUSING) {
- *     result = OboeStream_waitForStateChange(
- *                                   stream, currentState, &currentState, MY_TIMEOUT_NANOS);
- * }
- * </code></pre>
- *
- * @param stream A handle provided by OboeStreamBuilder_openStream()
- * @param inputState The state we want to avoid.
- * @param nextState Pointer to a variable that will be set to the new state.
- * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_waitForStateChange(OboeStream stream,
-                                            oboe_stream_state_t inputState,
-                                            oboe_stream_state_t *nextState,
-                                            oboe_nanoseconds_t timeoutNanoseconds);
-
-// ============================================================
-// Stream I/O
-// ============================================================
-
-/**
- * Read data from the stream.
- *
- * The call will wait until the read is complete or until it runs out of time.
- * If timeoutNanos is zero then this call will not wait.
- *
- * Note that timeoutNanoseconds is a relative duration in wall clock time.
- * Time will not stop if the thread is asleep.
- * So it will be implemented using CLOCK_BOOTTIME.
- *
- * This call is "strong non-blocking" unless it has to wait for data.
- *
- * @param stream A stream created using OboeStreamBuilder_openStream().
- * @param buffer The address of the first sample.
- * @param numFrames Number of frames to read. Only complete frames will be written.
- * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return The number of frames actually written or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_read(OboeStream stream,
-                               void *buffer,
-                               oboe_size_frames_t numFrames,
-                               oboe_nanoseconds_t timeoutNanoseconds);
-
-/**
- * Write data to the stream.
- *
- * The call will wait until the write is complete or until it runs out of time.
- * If timeoutNanos is zero then this call will not wait.
- *
- * Note that timeoutNanoseconds is a relative duration in wall clock time.
- * Time will not stop if the thread is asleep.
- * So it will be implemented using CLOCK_BOOTTIME.
- *
- * This call is "strong non-blocking" unless it has to wait for room in the buffer.
- *
- * @param stream A stream created using OboeStreamBuilder_openStream().
- * @param buffer The address of the first sample.
- * @param numFrames Number of frames to write. Only complete frames will be written.
- * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return The number of frames actually written or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_write(OboeStream stream,
-                               const void *buffer,
-                               oboe_size_frames_t numFrames,
-                               oboe_nanoseconds_t timeoutNanoseconds);
-
-
-// ============================================================
-// High priority audio threads
-// ============================================================
-
-/**
- * Create a thread associated with a stream. The thread has special properties for
- * low latency audio performance. This thread can be used to implement a callback API.
- *
- * Only one thread may be associated with a stream.
- *
- * Note that this API is in flux.
- *
- * @param stream A stream created using OboeStreamBuilder_openStream().
- * @param periodNanoseconds the estimated period at which the audio thread will need to wake up
- * @param startRoutine your thread entry point
- * @param arg an argument that will be passed to your thread entry point
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_createThread(OboeStream stream,
-                                     oboe_nanoseconds_t periodNanoseconds,
-                                     void *(*startRoutine)(void *), void *arg);
-
-/**
- * Wait until the thread exits or an error occurs.
- * The thread handle will be deleted.
- *
- * @param stream A stream created using OboeStreamBuilder_openStream().
- * @param returnArg a pointer to a variable to receive the return value
- * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_joinThread(OboeStream stream,
-                                   void **returnArg,
-                                   oboe_nanoseconds_t timeoutNanoseconds);
-
-// ============================================================
-// Stream - queries
-// ============================================================
-
-
-/**
- * This can be used to adjust the latency of the buffer by changing
- * the threshold where blocking will occur.
- * By combining this with OboeStream_getUnderrunCount(), the latency can be tuned
- * at run-time for each device.
- *
- * This cannot be set higher than OboeStream_getBufferCapacity().
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param requestedFrames requested number of frames that can be filled without blocking
- * @param actualFrames receives final number of frames
- * @return OBOE_OK or a negative error
- */
-OBOE_API oboe_result_t OboeStream_setBufferSize(OboeStream stream,
-                                                oboe_size_frames_t requestedFrames,
-                                                oboe_size_frames_t *actualFrames);
-
-/**
- * Query the maximum number of frames that can be filled without blocking.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param frames pointer to variable to receive the buffer size
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getBufferSize(OboeStream stream, oboe_size_frames_t *frames);
-
-/**
- * Query the number of frames that are read or written by the endpoint at one time.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param frames pointer to variable to receive the burst size
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getFramesPerBurst(OboeStream stream, oboe_size_frames_t *frames);
-
-/**
- * Query maximum buffer capacity in frames.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param frames pointer to variable to receive the buffer capacity
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getBufferCapacity(OboeStream stream, oboe_size_frames_t *frames);
-
-/**
- * An XRun is an Underrun or an Overrun.
- * During playing, an underrun will occur if the stream is not written in time
- * and the system runs out of valid data.
- * During recording, an overrun will occur if the stream is not read in time
- * and there is no place to put the incoming data so it is discarded.
- *
- * An underrun or overrun can cause an audible "pop" or "glitch".
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param xRunCount pointer to variable to receive the underrun or overrun count
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getXRunCount(OboeStream stream, int32_t *xRunCount);
-
-/**
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param sampleRate pointer to variable to receive the actual sample rate
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getSampleRate(OboeStream stream, oboe_sample_rate_t *sampleRate);
-
-/**
- * The samplesPerFrame is also known as channelCount.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param samplesPerFrame pointer to variable to receive the actual samples per frame
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getSamplesPerFrame(OboeStream stream, int32_t *samplesPerFrame);
-
-/**
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param format pointer to variable to receive the actual data format
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getFormat(OboeStream stream, oboe_audio_format_t *format);
-
-/**
- * Provide actual sharing mode.
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param sharingMode pointer to variable to receive the actual sharing mode
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getSharingMode(OboeStream stream,
-                                        oboe_sharing_mode_t *sharingMode);
-
-/**
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param direction pointer to a variable to be set to the current direction.
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getDirection(OboeStream stream, oboe_direction_t *direction);
-
-/**
- * Passes back the number of frames that have been written since the stream was created.
- * For an output stream, this will be advanced by the application calling write().
- * For an input stream, this will be advanced by the device or service.
- *
- * The frame position is monotonically increasing.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param frames pointer to variable to receive the frames written
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getFramesWritten(OboeStream stream,
-                                                   oboe_position_frames_t *frames);
-
-/**
- * Passes back the number of frames that have been read since the stream was created.
- * For an output stream, this will be advanced by the device or service.
- * For an input stream, this will be advanced by the application calling read().
- *
- * The frame position is monotonically increasing.
- *
- * @param stream handle provided by OboeStreamBuilder_openStream()
- * @param frames pointer to variable to receive the frames written
- * @return OBOE_OK or a negative error.
- */
-OBOE_API oboe_result_t OboeStream_getFramesRead(OboeStream stream, oboe_position_frames_t *frames);
-
-/**
- * Passes back the time at which a particular frame was presented.
- * This can be used to synchronize audio with video or MIDI.
- * It can also be used to align a recorded stream with a playback stream.
- *
- * Timestamps are only valid when the stream is in OBOE_STREAM_STATE_STARTED.
- * OBOE_ERROR_INVALID_STATE will be returned if the stream is not started.
- * Note that because requestStart() is asynchronous, timestamps will not be valid until
- * a short time after calling requestStart().
- * So OBOE_ERROR_INVALID_STATE should not be considered a fatal error.
- * Just try calling again later.
- *
- * If an error occurs, then the position and time will not be modified.
- *
- * The position and time passed back are monotonically increasing.
- *
- * @param stream A handle provided by OboeStreamBuilder_openStream()
- * @param clockid OBOE_CLOCK_MONOTONIC or OBOE_CLOCK_BOOTTIME
- * @param framePosition pointer to a variable to receive the position
- * @param timeNanoseconds pointer to a variable to receive the time
- * @return OBOE_OK or a negative error
- */
-OBOE_API oboe_result_t OboeStream_getTimestamp(OboeStream stream,
-                                      oboe_clockid_t clockid,
-                                      oboe_position_frames_t *framePosition,
-                                      oboe_nanoseconds_t *timeNanoseconds);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif //NATIVEOBOE_OBOEAUDIO_H
diff --git a/media/liboboe/include/oboe/OboeDefinitions.h b/media/liboboe/include/oboe/OboeDefinitions.h
deleted file mode 100644
index d80c958..0000000
--- a/media/liboboe/include/oboe/OboeDefinitions.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OBOE_OBOEDEFINITIONS_H
-#define OBOE_OBOEDEFINITIONS_H
-
-#include <stdint.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef int32_t  oboe_handle_t; // negative handles are error codes
-typedef int32_t  oboe_result_t;
-typedef int32_t  oboe_sample_rate_t;
-/** This is used for small quantities such as the number of frames in a buffer. */
-typedef int32_t  oboe_size_frames_t;
-/** This is used for small quantities such as the number of bytes in a frame. */
-typedef int32_t  oboe_size_bytes_t;
-/**
- * This is used for large quantities, such as the number of frames that have
- * been played since a stream was started.
- * At 48000 Hz, a 32-bit integer would wrap around in just over 12 hours.
- */
-typedef int64_t  oboe_position_frames_t;
-
-typedef int64_t  oboe_nanoseconds_t;
-typedef uint32_t oboe_audio_format_t;
-
-/**
- * This is used to represent a value that has not been specified.
- * For example, an application could use OBOE_UNSPECIFIED to indicate
- * that is did not not care what the specific value of a parameter was
- * and would accept whatever it was given.
- */
-#define OBOE_UNSPECIFIED           0
-#define OBOE_NANOS_PER_MICROSECOND ((int64_t)1000)
-#define OBOE_NANOS_PER_MILLISECOND (OBOE_NANOS_PER_MICROSECOND * 1000)
-#define OBOE_MILLIS_PER_SECOND     1000
-#define OBOE_NANOS_PER_SECOND      (OBOE_NANOS_PER_MILLISECOND * OBOE_MILLIS_PER_SECOND)
-
-#define OBOE_HANDLE_INVALID     ((oboe_handle_t)-1)
-
-enum oboe_direction_t {
-    OBOE_DIRECTION_OUTPUT,
-    OBOE_DIRECTION_INPUT,
-    OBOE_DIRECTION_COUNT // This should always be last.
-};
-
-enum oboe_datatype_t {
-    OBOE_AUDIO_DATATYPE_INT16,
-    OBOE_AUDIO_DATATYPE_INT32,
-    OBOE_AUDIO_DATATYPE_INT824,
-    OBOE_AUDIO_DATATYPE_UINT8,
-    OBOE_AUDIO_DATATYPE_FLOAT32, // Add new values below.
-    OBOE_AUDIO_DATATYPE_COUNT // This should always be last.
-};
-
-enum oboe_content_t {
-    OBOE_AUDIO_CONTENT_PCM,
-    OBOE_AUDIO_CONTENT_MP3,
-    OBOE_AUDIO_CONTENT_AAC,
-    OBOE_AUDIO_CONTENT_AC3,
-    OBOE_AUDIO_CONTENT_EAC3,
-    OBOE_AUDIO_CONTENT_DTS,
-    OBOE_AUDIO_CONTENT_DTSHD, // Add new values below.
-    OBOE_AUDIO_CONTENT_COUNT // This should always be last.
-};
-
-enum oboe_wrapper_t {
-    OBOE_AUDIO_WRAPPER_NONE,
-    OBOE_AUDIO_WRAPPER_IEC61937, // Add new values below.
-    OBOE_AUDIO_WRAPPER_COUNT // This should always be last.
-};
-
-/**
- * Fields packed into oboe_audio_format_t, from most to least significant bits.
- *   Invalid:1
- *   Reserved:7
- *   Wrapper:8
- *   Content:8
- *   Data Type:8
- */
-#define OBOE_AUDIO_FORMAT(dataType, content, wrapper) \
-    ((oboe_audio_format_t)((wrapper << 16) | (content << 8) | dataType))
-
-#define OBOE_AUDIO_FORMAT_RAW(dataType, content) \
-                OBOE_AUDIO_FORMAT(dataType, content, OBOE_AUDIO_WRAPPER_NONE)
-
-#define OBOE_AUDIO_FORMAT_DATA_TYPE(format) \
-    ((oboe_datatype_t)(format & 0x0FF))
-
-// Define some common formats.
-#define OBOE_AUDIO_FORMAT_PCM16  \
-                OBOE_AUDIO_FORMAT_RAW(OBOE_AUDIO_DATATYPE_INT16, OBOE_AUDIO_CONTENT_PCM)
-#define OBOE_AUDIO_FORMAT_PCM_FLOAT \
-                OBOE_AUDIO_FORMAT_RAW(OBOE_AUDIO_DATATYPE_FLOAT32, OBOE_AUDIO_CONTENT_PCM)
-#define OBOE_AUDIO_FORMAT_PCM824 \
-                OBOE_AUDIO_FORMAT_RAW(OBOE_AUDIO_DATATYPE_INT824, OBOE_AUDIO_CONTENT_PCM)
-#define OBOE_AUDIO_FORMAT_PCM32 \
-                OBOE_AUDIO_FORMAT_RAW(OBOE_AUDIO_DATATYPE_INT32, OBOE_AUDIO_CONTENT_PCM)
-#define OBOE_AUDIO_FORMAT_INVALID ((oboe_audio_format_t)-1)
-
-enum {
-    OBOE_OK,
-    OBOE_ERROR_BASE = -900, // TODO review
-    OBOE_ERROR_DISCONNECTED,
-    OBOE_ERROR_ILLEGAL_ARGUMENT,
-    OBOE_ERROR_INCOMPATIBLE,
-    OBOE_ERROR_INTERNAL, // an underlying API returned an error code
-    OBOE_ERROR_INVALID_STATE,
-    OBOE_ERROR_UNEXPECTED_STATE,
-    OBOE_ERROR_UNEXPECTED_VALUE,
-    OBOE_ERROR_INVALID_HANDLE,
-    OBOE_ERROR_INVALID_QUERY,
-    OBOE_ERROR_UNIMPLEMENTED,
-    OBOE_ERROR_UNAVAILABLE,
-    OBOE_ERROR_NO_FREE_HANDLES,
-    OBOE_ERROR_NO_MEMORY,
-    OBOE_ERROR_NULL,
-    OBOE_ERROR_TIMEOUT,
-    OBOE_ERROR_WOULD_BLOCK,
-    OBOE_ERROR_INVALID_ORDER,
-    OBOE_ERROR_OUT_OF_RANGE
-};
-
-typedef enum {
-    OBOE_CLOCK_MONOTONIC, // Clock since booted, pauses when CPU is sleeping.
-    OBOE_CLOCK_BOOTTIME,  // Clock since booted, runs all the time.
-    OBOE_CLOCK_COUNT // This should always be last.
-} oboe_clockid_t;
-
-typedef enum
-{
-    OBOE_STREAM_STATE_UNINITIALIZED = 0,
-    OBOE_STREAM_STATE_OPEN,
-    OBOE_STREAM_STATE_STARTING,
-    OBOE_STREAM_STATE_STARTED,
-    OBOE_STREAM_STATE_PAUSING,
-    OBOE_STREAM_STATE_PAUSED,
-    OBOE_STREAM_STATE_FLUSHING,
-    OBOE_STREAM_STATE_FLUSHED,
-    OBOE_STREAM_STATE_STOPPING,
-    OBOE_STREAM_STATE_STOPPED,
-    OBOE_STREAM_STATE_CLOSING,
-    OBOE_STREAM_STATE_CLOSED,
-} oboe_stream_state_t;
-
-// TODO review API
-typedef enum {
-    /**
-     * This will use an AudioTrack object for playing audio
-     * and an AudioRecord for recording data.
-     */
-    OBOE_SHARING_MODE_LEGACY,
-    /**
-     * This will be the only stream using a particular source or sink.
-     * This mode will provide the lowest possible latency.
-     * You should close EXCLUSIVE streams immediately when you are not using them.
-     */
-    OBOE_SHARING_MODE_EXCLUSIVE,
-    /**
-     * Multiple applications will be mixed by the Oboe Server.
-     * This will have higher latency than the EXCLUSIVE mode.
-     */
-    OBOE_SHARING_MODE_SHARED,
-    /**
-     * Multiple applications will do their own mixing into a memory mapped buffer.
-     * It may be possible for malicious applications to read the data produced by
-     * other apps. So do not use this for private data such as telephony or messaging.
-     */
-    OBOE_SHARING_MODE_PUBLIC_MIX,
-    OBOE_SHARING_MODE_COUNT // This should always be last.
-} oboe_sharing_mode_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // OBOE_OBOEDEFINITIONS_H
diff --git a/media/liboboe/liboboe.map.txt b/media/liboboe/liboboe.map.txt
index 9be7fe1..ecae991 100644
--- a/media/liboboe/liboboe.map.txt
+++ b/media/liboboe/liboboe.map.txt
@@ -1,46 +1,46 @@
-LIBOBOE {
+LIBAAUDIO {
   global:
-    Oboe_getNanoseconds;
-    Oboe_convertResultToText;
-    Oboe_convertStreamStateToText;
-    Oboe_createStreamBuilder;
-    OboeStreamBuilder_setDeviceId;
-    OboeStreamBuilder_setSampleRate;
-    OboeStreamBuilder_getSampleRate;
-    OboeStreamBuilder_setSamplesPerFrame;
-    OboeStreamBuilder_getSamplesPerFrame;
-    OboeStreamBuilder_setFormat;
-    OboeStreamBuilder_getFormat;
-    OboeStreamBuilder_setSharingMode;
-    OboeStreamBuilder_getSharingMode;
-    OboeStreamBuilder_setDirection;
-    OboeStreamBuilder_getDirection;
-    OboeStreamBuilder_openStream;
-    OboeStreamBuilder_delete;
-    OboeStream_close;
-    OboeStream_requestStart;
-    OboeStream_requestPause;
-    OboeStream_requestFlush;
-    OboeStream_requestStop;
-    OboeStream_getState;
-    OboeStream_waitForStateChange;
-    OboeStream_read;
-    OboeStream_write;
-    OboeStream_createThread;
-    OboeStream_joinThread;
-    OboeStream_setBufferSize;
-    OboeStream_getBufferSize;
-    OboeStream_getFramesPerBurst;
-    OboeStream_getBufferCapacity;
-    OboeStream_getXRunCount;
-    OboeStream_getSampleRate;
-    OboeStream_getSamplesPerFrame;
-    OboeStream_getFormat;
-    OboeStream_getSharingMode;
-    OboeStream_getDirection;
-    OboeStream_getFramesWritten;
-    OboeStream_getFramesRead;
-    OboeStream_getTimestamp;
+    AAudio_getNanoseconds;
+    AAudio_convertResultToText;
+    AAudio_convertStreamStateToText;
+    AAudio_createStreamBuilder;
+    AAudioStreamBuilder_setDeviceId;
+    AAudioStreamBuilder_setSampleRate;
+    AAudioStreamBuilder_getSampleRate;
+    AAudioStreamBuilder_setSamplesPerFrame;
+    AAudioStreamBuilder_getSamplesPerFrame;
+    AAudioStreamBuilder_setFormat;
+    AAudioStreamBuilder_getFormat;
+    AAudioStreamBuilder_setSharingMode;
+    AAudioStreamBuilder_getSharingMode;
+    AAudioStreamBuilder_setDirection;
+    AAudioStreamBuilder_getDirection;
+    AAudioStreamBuilder_openStream;
+    AAudioStreamBuilder_delete;
+    AAudioStream_close;
+    AAudioStream_requestStart;
+    AAudioStream_requestPause;
+    AAudioStream_requestFlush;
+    AAudioStream_requestStop;
+    AAudioStream_getState;
+    AAudioStream_waitForStateChange;
+    AAudioStream_read;
+    AAudioStream_write;
+    AAudioStream_createThread;
+    AAudioStream_joinThread;
+    AAudioStream_setBufferSize;
+    AAudioStream_getBufferSize;
+    AAudioStream_getFramesPerBurst;
+    AAudioStream_getBufferCapacity;
+    AAudioStream_getXRunCount;
+    AAudioStream_getSampleRate;
+    AAudioStream_getSamplesPerFrame;
+    AAudioStream_getFormat;
+    AAudioStream_getSharingMode;
+    AAudioStream_getDirection;
+    AAudioStream_getFramesWritten;
+    AAudioStream_getFramesRead;
+    AAudioStream_getTimestamp;
   local:
     *;
 };
diff --git a/media/liboboe/scripts/convert_oboe_aaudio.sh b/media/liboboe/scripts/convert_oboe_aaudio.sh
new file mode 100755
index 0000000..2bf025a
--- /dev/null
+++ b/media/liboboe/scripts/convert_oboe_aaudio.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Use SED to convert the Oboe API to the AAudio API
+
+echo "Convert Oboe names to AAudio names"
+
+echo "Top is ${ANDROID_BUILD_TOP}"
+LIBOBOE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/liboboe
+echo "LIBOBOE_DIR is ${LIBOBOE_DIR}"
+OBOESERVICE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/services/oboeservice
+echo "OBOESERVICE_DIR is ${OBOESERVICE_DIR}"
+OBOETEST_DIR=${ANDROID_BUILD_TOP}/cts/tests/tests/nativemedia/aaudio/src/
+echo "OBOETEST_DIR is ${OBOETEST_DIR}"
+
+function convertPathPattern {
+    path=$1
+    pattern=$2
+    find $path -type f  -name $pattern -exec sed -i -f ${LIBOBOE_DIR}/scripts/oboe_to_aaudio.sed {} \;
+}
+
+function convertPath {
+    path=$1
+    convertPathPattern $1 '*.cpp'
+    convertPathPattern $1 '*.h'
+    # the mk match does not work!
+    convertPathPattern $1 '*.mk'
+    convertPathPattern $1 '*.md'
+    convertPathPattern $1 '*.bp'
+}
+
+#convertPath ${LIBOBOE_DIR}/examples
+#convertPath ${LIBOBOE_DIR}/include
+#convertPath ${LIBOBOE_DIR}/src
+#convertPath ${LIBOBOE_DIR}/tests
+convertPath ${LIBOBOE_DIR}
+convertPathPattern ${LIBOBOE_DIR} Android.mk
+convertPathPattern ${LIBOBOE_DIR} liboboe.map.txt
+
+convertPath ${OBOESERVICE_DIR}
+convertPathPattern ${OBOESERVICE_DIR} Android.mk
+
+convertPathPattern ${OBOETEST_DIR} test_aaudio.cpp
+
+mv ${LIBOBOE_DIR}/include/oboe ${LIBOBOE_DIR}/include/aaudio
+mv ${LIBOBOE_DIR}/include/aaudio/OboeAudio.h ${LIBOBOE_DIR}/include/aaudio/AAudio.h
+mv ${OBOESERVICE_DIR}/OboeService.h ${OBOESERVICE_DIR}/AAudioServiceDefinitions.h
+mv ${LIBOBOE_DIR}/tests/test_oboe_api.cpp ${LIBOBOE_DIR}/tests/test_aaudio_api.cpp
+
+# Rename files with Oboe in the name.
+find -name "*OboeAudioService*.cpp"      | rename -v "s/OboeAudioService/AAudioService/g"
+find -name "*OboeAudioService*.h"      | rename -v "s/OboeAudioService/AAudioService/g"
+find -name "*Oboe*.cpp"      | rename -v "s/Oboe/AAudio/g"
+find -name "*Oboe*.h"        | rename -v "s/Oboe/AAudio/g"
diff --git a/media/liboboe/scripts/oboe_to_aaudio.sed b/media/liboboe/scripts/oboe_to_aaudio.sed
new file mode 100644
index 0000000..7da85a0
--- /dev/null
+++ b/media/liboboe/scripts/oboe_to_aaudio.sed
@@ -0,0 +1,16 @@
+s/liboboe/libclarinet/g
+s/oboeservice/clarinetservice/g
+
+s/OboeAudio\.h/AAudio\.h/g
+s/OboeService\.h/AAudioServiceDefinitions\.h/g
+s/OboeAudioService/AAudioService/g
+s/LOG_TAG "OboeAudio"/LOG_TAG "AAudio"/g
+s/OBOE_AUDIO_FORMAT/AAUDIO_FORMAT/g
+s/OBOEAUDIO/AAUDIO/g
+
+s/oboe/aaudio/g
+s/Oboe/AAudio/g
+s/OBOE/AAUDIO/g
+
+s/libclarinet/liboboe/g
+s/clarinetservice/oboeservice/g
diff --git a/media/liboboe/scripts/revert_all_aaudio.sh b/media/liboboe/scripts/revert_all_aaudio.sh
new file mode 100755
index 0000000..de3fa7a
--- /dev/null
+++ b/media/liboboe/scripts/revert_all_aaudio.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+echo "Revert Oboe names to AAudio names"
+
+echo "Top is ${ANDROID_BUILD_TOP}"
+LIBOBOE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/liboboe
+echo "LIBOBOE_DIR is ${LIBOBOE_DIR}"
+OBOESERVICE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/services/oboeservice
+echo "OBOESERVICE_DIR is ${OBOESERVICE_DIR}"
+OBOETEST_DIR=${ANDROID_BUILD_TOP}/cts/tests/tests/nativemedia/aaudio/src/
+echo "OBOETEST_DIR is ${OBOETEST_DIR}"
+
+git checkout -- ${LIBOBOE_DIR}/examples
+git checkout -- ${LIBOBOE_DIR}/include
+git checkout -- ${LIBOBOE_DIR}/src
+git checkout -- ${LIBOBOE_DIR}/tests
+git checkout -- ${LIBOBOE_DIR}/Android.bp
+git checkout -- ${LIBOBOE_DIR}/README.md
+git checkout -- ${LIBOBOE_DIR}/liboboe.map.txt
+git checkout -- ${OBOESERVICE_DIR}
+git checkout -- ${OBOETEST_DIR}
+
+rm -rf ${LIBOBOE_DIR}/include/aaudio
+
+find . -name "*aaudio*.cpp" -print -delete
+find . -name "*AAudio*.cpp" -print -delete
+find . -name "*AAudio*.h"   -print -delete
diff --git a/media/liboboe/src/Android.mk b/media/liboboe/src/Android.mk
index 7b9a906..a508be3 100644
--- a/media/liboboe/src/Android.mk
+++ b/media/liboboe/src/Android.mk
@@ -1,37 +1,58 @@
 LOCAL_PATH:= $(call my-dir)
 
 # ======================= STATIC LIBRARY ==========================
-# This is being built because it make Oboe testing very easy with a complete executable.
+# This is being built because it make AAudio testing very easy with a complete executable.
 # TODO Remove this target later, when not needed.
 include $(CLEAR_VARS)
 
 LOCAL_MODULE := liboboe
 LOCAL_MODULE_TAGS := optional
 
+LIBAAUDIO_DIR := $(TOP)/frameworks/av/media/liboboe
+LIBAAUDIO_SRC_DIR := $(LIBAAUDIO_DIR)/src
+
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-utils) \
     frameworks/native/include \
     system/core/base/include \
     frameworks/native/media/liboboe/include/include \
     frameworks/av/media/liboboe/include \
+    frameworks/native/include \
+    $(LOCAL_PATH) \
+    $(LOCAL_PATH)/binding \
+    $(LOCAL_PATH)/client \
     $(LOCAL_PATH)/core \
-    $(LOCAL_PATH)/utility \
-    $(LOCAL_PATH)/legacy
+    $(LOCAL_PATH)/fifo \
+    $(LOCAL_PATH)/legacy \
+    $(LOCAL_PATH)/utility
 
-LOCAL_SRC_FILES += core/AudioStream.cpp
-LOCAL_SRC_FILES += core/AudioStreamBuilder.cpp
-LOCAL_SRC_FILES += core/OboeAudio.cpp
-LOCAL_SRC_FILES += legacy/AudioStreamRecord.cpp
-LOCAL_SRC_FILES += legacy/AudioStreamTrack.cpp
-LOCAL_SRC_FILES += utility/HandleTracker.cpp
-LOCAL_SRC_FILES += utility/OboeUtilities.cpp
+LOCAL_SRC_FILES = \
+    core/AudioStream.cpp \
+    core/AudioStreamBuilder.cpp \
+    core/AAudioAudio.cpp \
+    legacy/AudioStreamRecord.cpp \
+    legacy/AudioStreamTrack.cpp \
+    utility/HandleTracker.cpp \
+    utility/AAudioUtilities.cpp \
+    fifo/FifoBuffer.cpp \
+    fifo/FifoControllerBase.cpp \
+    client/AudioEndpoint.cpp \
+    client/AudioStreamInternal.cpp \
+    client/IsochronousClockModel.cpp \
+    binding/SharedMemoryParcelable.cpp \
+    binding/SharedRegionParcelable.cpp \
+    binding/RingBufferParcelable.cpp \
+    binding/AudioEndpointParcelable.cpp \
+    binding/AAudioStreamRequest.cpp \
+    binding/AAudioStreamConfiguration.cpp \
+    binding/IAAudioService.cpp
 
-LOCAL_CFLAGS += -Wno-unused-parameter
-LOCAL_CFLAGS += -Wall -Werror
+LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
+
 # By default, all symbols are hidden.
-LOCAL_CFLAGS += -fvisibility=hidden
-# OBOE_API is used to explicitly export a function or a variable as a visible symbol.
-LOCAL_CFLAGS += -DOBOE_API='__attribute__((visibility("default")))'
+# LOCAL_CFLAGS += -fvisibility=hidden
+# AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
+LOCAL_CFLAGS += -DAAUDIO_API='__attribute__((visibility("default")))'
 
 include $(BUILD_STATIC_LIBRARY)
 
@@ -47,24 +68,41 @@
     system/core/base/include \
     frameworks/native/media/liboboe/include/include \
     frameworks/av/media/liboboe/include \
+    $(LOCAL_PATH) \
+    $(LOCAL_PATH)/binding \
+    $(LOCAL_PATH)/client \
     $(LOCAL_PATH)/core \
-    $(LOCAL_PATH)/utility \
-    $(LOCAL_PATH)/legacy
+    $(LOCAL_PATH)/fifo \
+    $(LOCAL_PATH)/legacy \
+    $(LOCAL_PATH)/utility
 
-LOCAL_SRC_FILES += core/AudioStream.cpp
-LOCAL_SRC_FILES += core/AudioStreamBuilder.cpp
-LOCAL_SRC_FILES += core/OboeAudio.cpp
-LOCAL_SRC_FILES += legacy/AudioStreamRecord.cpp
-LOCAL_SRC_FILES += legacy/AudioStreamTrack.cpp
-LOCAL_SRC_FILES += utility/HandleTracker.cpp
-LOCAL_SRC_FILES += utility/OboeUtilities.cpp
+LOCAL_SRC_FILES = core/AudioStream.cpp \
+    core/AudioStreamBuilder.cpp \
+    core/AAudioAudio.cpp \
+    legacy/AudioStreamRecord.cpp \
+    legacy/AudioStreamTrack.cpp \
+    utility/HandleTracker.cpp \
+    utility/AAudioUtilities.cpp \
+    fifo/FifoBuffer.cpp \
+    fifo/FifoControllerBase.cpp \
+    client/AudioEndpoint.cpp \
+    client/AudioStreamInternal.cpp \
+    client/IsochronousClockModel.cpp \
+    binding/SharedMemoryParcelable.cpp \
+    binding/SharedRegionParcelable.cpp \
+    binding/RingBufferParcelable.cpp \
+    binding/AudioEndpointParcelable.cpp \
+    binding/AAudioStreamRequest.cpp \
+    binding/AAudioStreamConfiguration.cpp \
+    binding/IAAudioService.cpp
 
-LOCAL_CFLAGS += -Wno-unused-parameter
-LOCAL_CFLAGS += -Wall -Werror
+LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
+
 # By default, all symbols are hidden.
-LOCAL_CFLAGS += -fvisibility=hidden
-# OBOE_API is used to explicitly export a function or a variable as a visible symbol.
-LOCAL_CFLAGS += -DOBOE_API='__attribute__((visibility("default")))'
+# LOCAL_CFLAGS += -fvisibility=hidden
+# AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
+LOCAL_CFLAGS += -DAAUDIO_API='__attribute__((visibility("default")))'
 
-LOCAL_SHARED_LIBRARIES := libaudioclient liblog libutils
+LOCAL_SHARED_LIBRARIES := libaudioclient liblog libcutils libutils libbinder
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/liboboe/src/binding/AAudioServiceDefinitions.h b/media/liboboe/src/binding/AAudioServiceDefinitions.h
new file mode 100644
index 0000000..ca637ef
--- /dev/null
+++ b/media/liboboe/src/binding/AAudioServiceDefinitions.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_AAUDIOSERVICEDEFINITIONS_H
+#define BINDING_AAUDIOSERVICEDEFINITIONS_H
+
+#include <stdint.h>
+#include <utils/RefBase.h>
+#include <binder/TextOutput.h>
+#include <binder/IInterface.h>
+
+#include <aaudio/AAudio.h>
+
+using android::NO_ERROR;
+using android::IBinder;
+
+namespace android {
+
+enum aaudio_commands_t {
+    OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
+    CLOSE_STREAM,
+    GET_STREAM_DESCRIPTION,
+    START_STREAM,
+    PAUSE_STREAM,
+    FLUSH_STREAM,
+    REGISTER_AUDIO_THREAD,
+    UNREGISTER_AUDIO_THREAD
+};
+
+} // namespace android
+
+namespace aaudio {
+
+enum aaudio_commands_t {
+    OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
+    CLOSE_STREAM,
+    GET_STREAM_DESCRIPTION,
+    START_STREAM,
+    PAUSE_STREAM,
+    FLUSH_STREAM,
+    REGISTER_AUDIO_THREAD,
+    UNREGISTER_AUDIO_THREAD
+};
+
+// TODO Expand this to include all the open parameters.
+typedef struct AAudioServiceStreamInfo_s {
+    int32_t             deviceId;
+    int32_t             samplesPerFrame;  // number of channels
+    aaudio_sample_rate_t  sampleRate;
+    aaudio_audio_format_t audioFormat;
+} AAudioServiceStreamInfo;
+
+// This must be a fixed width so it can be in shared memory.
+enum RingbufferFlags : uint32_t {
+    NONE = 0,
+    RATE_ISOCHRONOUS = 0x0001,
+    RATE_ASYNCHRONOUS = 0x0002,
+    COHERENCY_DMA = 0x0004,
+    COHERENCY_ACQUIRE_RELEASE = 0x0008,
+    COHERENCY_AUTO = 0x0010,
+};
+
+// This is not passed through Binder.
+// Client side code will convert Binder data and fill this descriptor.
+typedef struct RingBufferDescriptor_s {
+    uint8_t* dataAddress;       // offset from read or write block
+    int64_t* writeCounterAddress;
+    int64_t* readCounterAddress;
+    int32_t  bytesPerFrame;     // index is in frames
+    int32_t  framesPerBurst;    // for ISOCHRONOUS queues
+    int32_t  capacityInFrames;  // zero if unused
+    RingbufferFlags flags;
+} RingBufferDescriptor;
+
+// This is not passed through Binder.
+// Client side code will convert Binder data and fill this descriptor.
+typedef struct EndpointDescriptor_s {
+    // Set capacityInFrames to zero if Queue is unused.
+    RingBufferDescriptor upMessageQueueDescriptor;   // server to client
+    RingBufferDescriptor downMessageQueueDescriptor; // client to server
+    RingBufferDescriptor upDataQueueDescriptor;      // eg. record
+    RingBufferDescriptor downDataQueueDescriptor;    // eg. playback
+} EndpointDescriptor;
+
+} // namespace aaudio
+
+#endif //BINDING_AAUDIOSERVICEDEFINITIONS_H
diff --git a/media/liboboe/src/binding/AAudioServiceMessage.h b/media/liboboe/src/binding/AAudioServiceMessage.h
new file mode 100644
index 0000000..16cb5eb
--- /dev/null
+++ b/media/liboboe/src/binding/AAudioServiceMessage.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_MESSAGE_H
+#define AAUDIO_AAUDIO_SERVICE_MESSAGE_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+namespace aaudio {
+
+// TODO move this an "include" folder for the service.
+
+struct AAudioMessageTimestamp {
+    aaudio_position_frames_t position;
+    int64_t                deviceOffset; // add to client position to get device position
+    aaudio_nanoseconds_t     timestamp;
+};
+
+typedef enum aaudio_service_event_e : uint32_t {
+    AAUDIO_SERVICE_EVENT_STARTED,
+    AAUDIO_SERVICE_EVENT_PAUSED,
+    AAUDIO_SERVICE_EVENT_FLUSHED,
+    AAUDIO_SERVICE_EVENT_CLOSED,
+    AAUDIO_SERVICE_EVENT_DISCONNECTED
+} aaudio_service_event_t;
+
+struct AAudioMessageEvent {
+    aaudio_service_event_t event;
+    int32_t data1;
+    int64_t data2;
+};
+
+typedef struct AAudioServiceMessage_s {
+    enum class code : uint32_t {
+        NOTHING,
+        TIMESTAMP,
+        EVENT,
+    };
+
+    code what;
+    union {
+        AAudioMessageTimestamp timestamp;
+        AAudioMessageEvent event;
+    };
+} AAudioServiceMessage;
+
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_MESSAGE_H
diff --git a/media/liboboe/src/binding/AAudioStreamConfiguration.cpp b/media/liboboe/src/binding/AAudioStreamConfiguration.cpp
new file mode 100644
index 0000000..1cb2bfa
--- /dev/null
+++ b/media/liboboe/src/binding/AAudioStreamConfiguration.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+#include "binding/AAudioStreamConfiguration.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace aaudio;
+
+AAudioStreamConfiguration::AAudioStreamConfiguration() {}
+AAudioStreamConfiguration::~AAudioStreamConfiguration() {}
+
+status_t AAudioStreamConfiguration::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mDeviceId);
+    parcel->writeInt32(mSampleRate);
+    parcel->writeInt32(mSamplesPerFrame);
+    parcel->writeInt32((int32_t) mAudioFormat);
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t AAudioStreamConfiguration::readFromParcel(const Parcel* parcel) {
+    int32_t temp;
+    parcel->readInt32(&mDeviceId);
+    parcel->readInt32(&mSampleRate);
+    parcel->readInt32(&mSamplesPerFrame);
+    parcel->readInt32(&temp);
+    mAudioFormat = (aaudio_audio_format_t) temp;
+    return NO_ERROR; // TODO check for errors above
+}
+
+aaudio_result_t AAudioStreamConfiguration::validate() {
+    // Validate results of the open.
+    if (mSampleRate < 0 || mSampleRate >= 8 * 48000) { // TODO review limits
+        ALOGE("AAudioStreamConfiguration.validate(): invalid sampleRate = %d", mSampleRate);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+
+    if (mSamplesPerFrame < 1 || mSamplesPerFrame >= 32) { // TODO review limits
+        ALOGE("AAudioStreamConfiguration.validate() invalid samplesPerFrame = %d", mSamplesPerFrame);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+
+    switch (mAudioFormat) {
+    case AAUDIO_FORMAT_PCM_I16:
+    case AAUDIO_FORMAT_PCM_FLOAT:
+    case AAUDIO_FORMAT_PCM_I8_24:
+    case AAUDIO_FORMAT_PCM_I32:
+        break;
+    default:
+        ALOGE("AAudioStreamConfiguration.validate() invalid audioFormat = %d", mAudioFormat);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+    return AAUDIO_OK;
+}
+
+void AAudioStreamConfiguration::dump() {
+    ALOGD("AAudioStreamConfiguration mSampleRate = %d -----", mSampleRate);
+    ALOGD("AAudioStreamConfiguration mSamplesPerFrame = %d", mSamplesPerFrame);
+    ALOGD("AAudioStreamConfiguration mAudioFormat = %d", (int)mAudioFormat);
+}
diff --git a/media/liboboe/src/binding/AAudioStreamConfiguration.h b/media/liboboe/src/binding/AAudioStreamConfiguration.h
new file mode 100644
index 0000000..ef21443
--- /dev/null
+++ b/media/liboboe/src/binding/AAudioStreamConfiguration.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_AAUDIO_STREAM_CONFIGURATION_H
+#define BINDING_AAUDIO_STREAM_CONFIGURATION_H
+
+#include <stdint.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <aaudio/AAudioDefinitions.h>
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace aaudio {
+
+class AAudioStreamConfiguration : public Parcelable {
+public:
+    AAudioStreamConfiguration();
+    virtual ~AAudioStreamConfiguration();
+
+    aaudio_device_id_t getDeviceId() const {
+        return mDeviceId;
+    }
+
+    void setDeviceId(aaudio_device_id_t deviceId) {
+        mDeviceId = deviceId;
+    }
+
+    aaudio_sample_rate_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    void setSampleRate(aaudio_sample_rate_t sampleRate) {
+        mSampleRate = sampleRate;
+    }
+
+    int32_t getSamplesPerFrame() const {
+        return mSamplesPerFrame;
+    }
+
+    void setSamplesPerFrame(int32_t samplesPerFrame) {
+        mSamplesPerFrame = samplesPerFrame;
+    }
+
+    aaudio_audio_format_t getAudioFormat() const {
+        return mAudioFormat;
+    }
+
+    void setAudioFormat(aaudio_audio_format_t audioFormat) {
+        mAudioFormat = audioFormat;
+    }
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    aaudio_result_t validate();
+
+    void dump();
+
+protected:
+    aaudio_device_id_t    mDeviceId        = AAUDIO_DEVICE_UNSPECIFIED;
+    aaudio_sample_rate_t  mSampleRate      = AAUDIO_UNSPECIFIED;
+    int32_t             mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+    aaudio_audio_format_t mAudioFormat     = AAUDIO_FORMAT_UNSPECIFIED;
+};
+
+} /* namespace aaudio */
+
+#endif //BINDING_AAUDIO_STREAM_CONFIGURATION_H
diff --git a/media/liboboe/src/binding/AAudioStreamRequest.cpp b/media/liboboe/src/binding/AAudioStreamRequest.cpp
new file mode 100644
index 0000000..5202b73
--- /dev/null
+++ b/media/liboboe/src/binding/AAudioStreamRequest.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/AAudioStreamRequest.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace aaudio;
+
+AAudioStreamRequest::AAudioStreamRequest()
+    : mConfiguration()
+    {}
+
+AAudioStreamRequest::~AAudioStreamRequest() {}
+
+status_t AAudioStreamRequest::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32((int32_t) mUserId);
+    parcel->writeInt32((int32_t) mProcessId);
+    mConfiguration.writeToParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t AAudioStreamRequest::readFromParcel(const Parcel* parcel) {
+    int32_t temp;
+    parcel->readInt32(&temp);
+    mUserId = (uid_t) temp;
+    parcel->readInt32(&temp);
+    mProcessId = (pid_t) temp;
+    mConfiguration.readFromParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+aaudio_result_t AAudioStreamRequest::validate() {
+    return mConfiguration.validate();
+}
+
+void AAudioStreamRequest::dump() {
+    ALOGD("AAudioStreamRequest mUserId = %d -----", mUserId);
+    ALOGD("AAudioStreamRequest mProcessId = %d", mProcessId);
+    mConfiguration.dump();
+}
diff --git a/media/liboboe/src/binding/AAudioStreamRequest.h b/media/liboboe/src/binding/AAudioStreamRequest.h
new file mode 100644
index 0000000..0fd28ba
--- /dev/null
+++ b/media/liboboe/src/binding/AAudioStreamRequest.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_AAUDIO_STREAM_REQUEST_H
+#define BINDING_AAUDIO_STREAM_REQUEST_H
+
+#include <stdint.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <aaudio/AAudioDefinitions.h>
+
+#include "binding/AAudioStreamConfiguration.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace aaudio {
+
+class AAudioStreamRequest : public Parcelable {
+public:
+    AAudioStreamRequest();
+    virtual ~AAudioStreamRequest();
+
+    uid_t getUserId() const {
+        return mUserId;
+    }
+
+    void setUserId(uid_t userId) {
+        mUserId = userId;
+    }
+
+    pid_t getProcessId() const {
+        return mProcessId;
+    }
+
+    void setProcessId(pid_t processId) {
+        mProcessId = processId;
+    }
+
+    AAudioStreamConfiguration &getConfiguration() {
+        return mConfiguration;
+    }
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    aaudio_result_t validate();
+
+    void dump();
+
+protected:
+    AAudioStreamConfiguration  mConfiguration;
+    uid_t    mUserId;
+    pid_t    mProcessId;
+};
+
+} /* namespace aaudio */
+
+#endif //BINDING_AAUDIO_STREAM_REQUEST_H
diff --git a/media/liboboe/src/binding/AudioEndpointParcelable.cpp b/media/liboboe/src/binding/AudioEndpointParcelable.cpp
new file mode 100644
index 0000000..f40ee02
--- /dev/null
+++ b/media/liboboe/src/binding/AudioEndpointParcelable.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/RingBufferParcelable.h"
+#include "binding/AudioEndpointParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace aaudio;
+
+/**
+ * Container for information about the message queues plus
+ * general stream information needed by AAudio clients.
+ * It contains no addresses, just sizes, offsets and file descriptors for
+ * shared memory that can be passed through Binder.
+ */
+AudioEndpointParcelable::AudioEndpointParcelable() {}
+
+AudioEndpointParcelable::~AudioEndpointParcelable() {}
+
+/**
+ * Add the file descriptor to the table.
+ * @return index in table or negative error
+ */
+int32_t AudioEndpointParcelable::addFileDescriptor(int fd, int32_t sizeInBytes) {
+    if (mNumSharedMemories >= MAX_SHARED_MEMORIES) {
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    int32_t index = mNumSharedMemories++;
+    mSharedMemories[index].setup(fd, sizeInBytes);
+    return index;
+}
+
+/**
+ * The read and write must be symmetric.
+ */
+status_t AudioEndpointParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mNumSharedMemories);
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        mSharedMemories[i].writeToParcel(parcel);
+    }
+    mUpMessageQueueParcelable.writeToParcel(parcel);
+    mDownMessageQueueParcelable.writeToParcel(parcel);
+    mUpDataQueueParcelable.writeToParcel(parcel);
+    mDownDataQueueParcelable.writeToParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t AudioEndpointParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mNumSharedMemories);
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        mSharedMemories[i].readFromParcel(parcel);
+    }
+    mUpMessageQueueParcelable.readFromParcel(parcel);
+    mDownMessageQueueParcelable.readFromParcel(parcel);
+    mUpDataQueueParcelable.readFromParcel(parcel);
+    mDownDataQueueParcelable.readFromParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+aaudio_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
+    // TODO error check
+    mUpMessageQueueParcelable.resolve(mSharedMemories, &descriptor->upMessageQueueDescriptor);
+    mDownMessageQueueParcelable.resolve(mSharedMemories,
+                                        &descriptor->downMessageQueueDescriptor);
+    mUpDataQueueParcelable.resolve(mSharedMemories, &descriptor->upDataQueueDescriptor);
+    mDownDataQueueParcelable.resolve(mSharedMemories, &descriptor->downDataQueueDescriptor);
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AudioEndpointParcelable::validate() {
+    aaudio_result_t result;
+    if (mNumSharedMemories < 0 || mNumSharedMemories >= MAX_SHARED_MEMORIES) {
+        ALOGE("AudioEndpointParcelable invalid mNumSharedMemories = %d", mNumSharedMemories);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        result = mSharedMemories[i].validate();
+        if (result != AAUDIO_OK) {
+            return result;
+        }
+    }
+    if ((result = mUpMessageQueueParcelable.validate()) != AAUDIO_OK) {
+        ALOGE("AudioEndpointParcelable invalid mUpMessageQueueParcelable = %d", result);
+        return result;
+    }
+    if ((result = mDownMessageQueueParcelable.validate()) != AAUDIO_OK) {
+        ALOGE("AudioEndpointParcelable invalid mDownMessageQueueParcelable = %d", result);
+        return result;
+    }
+    if ((result = mUpDataQueueParcelable.validate()) != AAUDIO_OK) {
+        ALOGE("AudioEndpointParcelable invalid mUpDataQueueParcelable = %d", result);
+        return result;
+    }
+    if ((result = mDownDataQueueParcelable.validate()) != AAUDIO_OK) {
+        ALOGE("AudioEndpointParcelable invalid mDownDataQueueParcelable = %d", result);
+        return result;
+    }
+    return AAUDIO_OK;
+}
+
+void AudioEndpointParcelable::dump() {
+    ALOGD("AudioEndpointParcelable ======================================= BEGIN");
+    ALOGD("AudioEndpointParcelable mNumSharedMemories = %d", mNumSharedMemories);
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        mSharedMemories[i].dump();
+    }
+    ALOGD("AudioEndpointParcelable mUpMessageQueueParcelable =========");
+    mUpMessageQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable mDownMessageQueueParcelable =======");
+    mDownMessageQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable mUpDataQueueParcelable ============");
+    mUpDataQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable mDownDataQueueParcelable ==========");
+    mDownDataQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable ======================================= END");
+}
+
diff --git a/media/liboboe/src/binding/AudioEndpointParcelable.h b/media/liboboe/src/binding/AudioEndpointParcelable.h
new file mode 100644
index 0000000..d4646d0
--- /dev/null
+++ b/media/liboboe/src/binding/AudioEndpointParcelable.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_AUDIOENDPOINTPARCELABLE_H
+#define BINDING_AUDIOENDPOINTPARCELABLE_H
+
+#include <stdint.h>
+
+//#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/RingBufferParcelable.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace aaudio {
+
+/**
+ * Container for information about the message queues plus
+ * general stream information needed by AAudio clients.
+ * It contains no addresses, just sizes, offsets and file descriptors for
+ * shared memory that can be passed through Binder.
+ */
+class AudioEndpointParcelable : public Parcelable {
+public:
+    AudioEndpointParcelable();
+    virtual ~AudioEndpointParcelable();
+
+    /**
+     * Add the file descriptor to the table.
+     * @return index in table or negative error
+     */
+    int32_t addFileDescriptor(int fd, int32_t sizeInBytes);
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    aaudio_result_t resolve(EndpointDescriptor *descriptor);
+
+    aaudio_result_t validate();
+
+    void dump();
+
+public: // TODO add getters
+    // Set capacityInFrames to zero if Queue is unused.
+    RingBufferParcelable    mUpMessageQueueParcelable;   // server to client
+    RingBufferParcelable    mDownMessageQueueParcelable; // to server
+    RingBufferParcelable    mUpDataQueueParcelable;      // eg. record, could share same queue
+    RingBufferParcelable    mDownDataQueueParcelable;    // eg. playback
+
+private:
+    int32_t                 mNumSharedMemories = 0;
+    SharedMemoryParcelable  mSharedMemories[MAX_SHARED_MEMORIES];
+};
+
+} /* namespace aaudio */
+
+#endif //BINDING_AUDIOENDPOINTPARCELABLE_H
diff --git a/media/liboboe/src/binding/IAAudioService.cpp b/media/liboboe/src/binding/IAAudioService.cpp
new file mode 100644
index 0000000..899ebc0
--- /dev/null
+++ b/media/liboboe/src/binding/IAAudioService.cpp
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <aaudio/AAudioDefinitions.h>
+
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/IAAudioService.h"
+#include "utility/AAudioUtilities.h"
+
+namespace android {
+
+/**
+ * This is used by the AAudio Client to talk to the AAudio Service.
+ *
+ * The order of parameters in the Parcels must match with code in AAudioService.cpp.
+ */
+class BpAAudioService : public BpInterface<IAAudioService>
+{
+public:
+    explicit BpAAudioService(const sp<IBinder>& impl)
+        : BpInterface<IAAudioService>(impl)
+    {
+    }
+
+    virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
+                                     aaudio::AAudioStreamConfiguration &configuration) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+        request.writeToParcel(&data);
+        status_t err = remote()->transact(OPEN_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        // parse reply
+        aaudio_handle_t stream;
+        reply.readInt32(&stream);
+        configuration.readFromParcel(&reply);
+        return stream;
+    }
+
+    virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(CLOSE_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        // parse reply
+        aaudio_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+                                               aaudio::AudioEndpointParcelable &parcelable)   {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(GET_STREAM_DESCRIPTION, data, &reply);
+        if (err != NO_ERROR) {
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        // parse reply
+        parcelable.readFromParcel(&reply);
+        parcelable.dump();
+        aaudio_result_t result = parcelable.validate();
+        if (result != AAUDIO_OK) {
+            return result;
+        }
+        reply.readInt32(&result);
+        return result;
+    }
+
+    // TODO should we wait for a reply?
+    virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(START_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        // parse reply
+        aaudio_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(PAUSE_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        // parse reply
+        aaudio_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(FLUSH_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        // parse reply
+        aaudio_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId,
+                                              aaudio_nanoseconds_t periodNanoseconds)
+    override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        data.writeInt32((int32_t) clientThreadId);
+        data.writeInt64(periodNanoseconds);
+        status_t err = remote()->transact(REGISTER_AUDIO_THREAD, data, &reply);
+        if (err != NO_ERROR) {
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        // parse reply
+        aaudio_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId)
+    override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        data.writeInt32((int32_t) clientThreadId);
+        status_t err = remote()->transact(UNREGISTER_AUDIO_THREAD, data, &reply);
+        if (err != NO_ERROR) {
+            return AAudioConvert_androidToAAudioResult(err);
+        }
+        // parse reply
+        aaudio_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+};
+
+// Implement an interface to the service.
+// This is here so that you don't have to link with liboboe static library.
+IMPLEMENT_META_INTERFACE(AAudioService, "IAAudioService");
+
+// The order of parameters in the Parcels must match with code in BpAAudioService
+
+status_t BnAAudioService::onTransact(uint32_t code, const Parcel& data,
+                                        Parcel* reply, uint32_t flags) {
+    AAudioStream stream;
+    aaudio::AAudioStreamRequest request;
+    aaudio::AAudioStreamConfiguration configuration;
+    pid_t pid;
+    aaudio_nanoseconds_t nanoseconds;
+    aaudio_result_t result;
+    ALOGV("BnAAudioService::onTransact(%i) %i", code, flags);
+    data.checkInterface(this);
+
+    switch(code) {
+        case OPEN_STREAM: {
+            request.readFromParcel(&data);
+            stream = openStream(request, configuration);
+            ALOGD("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X", stream);
+            reply->writeInt32(stream);
+            configuration.writeToParcel(reply);
+            return NO_ERROR;
+        } break;
+
+        case CLOSE_STREAM: {
+            data.readInt32(&stream);
+            ALOGD("BnAAudioService::onTransact CLOSE_STREAM 0x%08X", stream);
+            result = closeStream(stream);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case GET_STREAM_DESCRIPTION: {
+            data.readInt32(&stream);
+            ALOGD("BnAAudioService::onTransact GET_STREAM_DESCRIPTION 0x%08X", stream);
+            aaudio::AudioEndpointParcelable parcelable;
+            result = getStreamDescription(stream, parcelable);
+            if (result != AAUDIO_OK) {
+                return AAudioConvert_aaudioToAndroidStatus(result);
+            }
+            parcelable.dump();
+            result = parcelable.validate();
+            if (result != AAUDIO_OK) {
+                return AAudioConvert_aaudioToAndroidStatus(result);
+            }
+            parcelable.writeToParcel(reply);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case START_STREAM: {
+            data.readInt32(&stream);
+            result = startStream(stream);
+            ALOGD("BnAAudioService::onTransact START_STREAM 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case PAUSE_STREAM: {
+            data.readInt32(&stream);
+            result = pauseStream(stream);
+            ALOGD("BnAAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case FLUSH_STREAM: {
+            data.readInt32(&stream);
+            result = flushStream(stream);
+            ALOGD("BnAAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case REGISTER_AUDIO_THREAD: {
+            data.readInt32(&stream);
+            data.readInt32(&pid);
+            data.readInt64(&nanoseconds);
+            result = registerAudioThread(stream, pid, nanoseconds);
+            ALOGD("BnAAudioService::onTransact REGISTER_AUDIO_THREAD 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case UNREGISTER_AUDIO_THREAD: {
+            data.readInt32(&stream);
+            data.readInt32(&pid);
+            result = unregisterAudioThread(stream, pid);
+            ALOGD("BnAAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        default:
+            // ALOGW("BnAAudioService::onTransact not handled %u", code);
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+} /* namespace android */
diff --git a/media/liboboe/src/binding/IAAudioService.h b/media/liboboe/src/binding/IAAudioService.h
new file mode 100644
index 0000000..7d2fd29
--- /dev/null
+++ b/media/liboboe/src/binding/IAAudioService.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_IAAUDIOSERVICE_H
+#define BINDING_IAAUDIOSERVICE_H
+
+#include <stdint.h>
+#include <utils/RefBase.h>
+#include <binder/TextOutput.h>
+#include <binder/IInterface.h>
+
+#include <aaudio/AAudio.h>
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+
+
+namespace android {
+
+// Interface (our AIDL) - Shared by server and client
+class IAAudioService : public IInterface {
+public:
+
+    DECLARE_META_INTERFACE(AAudioService);
+
+    virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
+                                     aaudio::AAudioStreamConfiguration &configuration) = 0;
+
+    virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) = 0;
+
+    /* Get an immutable description of the in-memory queues
+    * used to communicate with the underlying HAL or Service.
+    */
+    virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+                                               aaudio::AudioEndpointParcelable &parcelable) = 0;
+
+    /**
+     * Start the flow of data.
+     */
+    virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) = 0;
+
+    /**
+     * Stop the flow of data such that start() can resume without loss of data.
+     */
+    virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) = 0;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     */
+    virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) = 0;
+
+    /**
+     * Manage the specified thread as a low latency audio thread.
+     */
+    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle, pid_t clientThreadId,
+                                              aaudio_nanoseconds_t periodNanoseconds) = 0;
+
+    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                pid_t clientThreadId) = 0;
+};
+
+class BnAAudioService : public BnInterface<IAAudioService> {
+public:
+    virtual status_t onTransact(uint32_t code, const Parcel& data,
+                                Parcel* reply, uint32_t flags = 0);
+
+};
+
+} /* namespace android */
+
+#endif //BINDING_IAAUDIOSERVICE_H
diff --git a/media/liboboe/src/binding/RingBufferParcelable.cpp b/media/liboboe/src/binding/RingBufferParcelable.cpp
new file mode 100644
index 0000000..3a92929
--- /dev/null
+++ b/media/liboboe/src/binding/RingBufferParcelable.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <binder/Parcelable.h>
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/SharedRegionParcelable.h"
+#include "binding/RingBufferParcelable.h"
+
+using namespace aaudio;
+
+RingBufferParcelable::RingBufferParcelable() {}
+RingBufferParcelable::~RingBufferParcelable() {}
+
+// TODO This assumes that all three use the same SharedMemoryParcelable
+void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
+                 int32_t dataMemoryOffset,
+                 int32_t dataSizeInBytes,
+                 int32_t readCounterOffset,
+                 int32_t writeCounterOffset,
+                 int32_t counterSizeBytes) {
+    mReadCounterParcelable.setup(sharedMemoryIndex, readCounterOffset, counterSizeBytes);
+    mWriteCounterParcelable.setup(sharedMemoryIndex, writeCounterOffset, counterSizeBytes);
+    mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
+}
+
+void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
+                 int32_t dataMemoryOffset,
+                 int32_t dataSizeInBytes) {
+    mReadCounterParcelable.setup(sharedMemoryIndex, 0, 0);
+    mWriteCounterParcelable.setup(sharedMemoryIndex, 0, 0);
+    mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
+}
+
+int32_t RingBufferParcelable::getBytesPerFrame() {
+    return mBytesPerFrame;
+}
+
+void RingBufferParcelable::setBytesPerFrame(int32_t bytesPerFrame) {
+    mBytesPerFrame = bytesPerFrame;
+}
+
+int32_t RingBufferParcelable::getFramesPerBurst() {
+    return mFramesPerBurst;
+}
+
+void RingBufferParcelable::setFramesPerBurst(int32_t framesPerBurst) {
+    mFramesPerBurst = framesPerBurst;
+}
+
+int32_t RingBufferParcelable::getCapacityInFrames() {
+    return mCapacityInFrames;
+}
+
+void RingBufferParcelable::setCapacityInFrames(int32_t capacityInFrames) {
+    mCapacityInFrames = capacityInFrames;
+}
+
+/**
+ * The read and write must be symmetric.
+ */
+status_t RingBufferParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mCapacityInFrames);
+    if (mCapacityInFrames > 0) {
+        parcel->writeInt32(mBytesPerFrame);
+        parcel->writeInt32(mFramesPerBurst);
+        parcel->writeInt32(mFlags);
+        mReadCounterParcelable.writeToParcel(parcel);
+        mWriteCounterParcelable.writeToParcel(parcel);
+        mDataParcelable.writeToParcel(parcel);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t RingBufferParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mCapacityInFrames);
+    if (mCapacityInFrames > 0) {
+        parcel->readInt32(&mBytesPerFrame);
+        parcel->readInt32(&mFramesPerBurst);
+        parcel->readInt32((int32_t *)&mFlags);
+        mReadCounterParcelable.readFromParcel(parcel);
+        mWriteCounterParcelable.readFromParcel(parcel);
+        mDataParcelable.readFromParcel(parcel);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+aaudio_result_t RingBufferParcelable::resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor) {
+    aaudio_result_t result;
+
+    result = mReadCounterParcelable.resolve(memoryParcels,
+                                            (void **) &descriptor->readCounterAddress);
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+
+    result = mWriteCounterParcelable.resolve(memoryParcels,
+                                             (void **) &descriptor->writeCounterAddress);
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+
+    result = mDataParcelable.resolve(memoryParcels, (void **) &descriptor->dataAddress);
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+
+    descriptor->bytesPerFrame = mBytesPerFrame;
+    descriptor->framesPerBurst = mFramesPerBurst;
+    descriptor->capacityInFrames = mCapacityInFrames;
+    descriptor->flags = mFlags;
+    return AAUDIO_OK;
+}
+
+aaudio_result_t RingBufferParcelable::validate() {
+    aaudio_result_t result;
+    if (mCapacityInFrames < 0 || mCapacityInFrames >= 32 * 1024) {
+        ALOGE("RingBufferParcelable invalid mCapacityInFrames = %d", mCapacityInFrames);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+    if (mBytesPerFrame < 0 || mBytesPerFrame >= 256) {
+        ALOGE("RingBufferParcelable invalid mBytesPerFrame = %d", mBytesPerFrame);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+    if (mFramesPerBurst < 0 || mFramesPerBurst >= 1024) {
+        ALOGE("RingBufferParcelable invalid mFramesPerBurst = %d", mFramesPerBurst);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+    if ((result = mReadCounterParcelable.validate()) != AAUDIO_OK) {
+        ALOGE("RingBufferParcelable invalid mReadCounterParcelable = %d", result);
+        return result;
+    }
+    if ((result = mWriteCounterParcelable.validate()) != AAUDIO_OK) {
+        ALOGE("RingBufferParcelable invalid mWriteCounterParcelable = %d", result);
+        return result;
+    }
+    if ((result = mDataParcelable.validate()) != AAUDIO_OK) {
+        ALOGE("RingBufferParcelable invalid mDataParcelable = %d", result);
+        return result;
+    }
+    return AAUDIO_OK;
+}
+
+
+void RingBufferParcelable::dump() {
+    ALOGD("RingBufferParcelable mCapacityInFrames = %d ---------", mCapacityInFrames);
+    if (mCapacityInFrames > 0) {
+        ALOGD("RingBufferParcelable mBytesPerFrame = %d", mBytesPerFrame);
+        ALOGD("RingBufferParcelable mFramesPerBurst = %d", mFramesPerBurst);
+        ALOGD("RingBufferParcelable mFlags = %u", mFlags);
+        mReadCounterParcelable.dump();
+        mWriteCounterParcelable.dump();
+        mDataParcelable.dump();
+    }
+}
diff --git a/media/liboboe/src/binding/RingBufferParcelable.h b/media/liboboe/src/binding/RingBufferParcelable.h
new file mode 100644
index 0000000..3f82c79
--- /dev/null
+++ b/media/liboboe/src/binding/RingBufferParcelable.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_RINGBUFFER_PARCELABLE_H
+#define BINDING_RINGBUFFER_PARCELABLE_H
+
+#include <stdint.h>
+
+#include <binder/Parcelable.h>
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/SharedRegionParcelable.h"
+
+namespace aaudio {
+
+class RingBufferParcelable : public Parcelable {
+public:
+    RingBufferParcelable();
+    virtual ~RingBufferParcelable();
+
+    // TODO This assumes that all three use the same SharedMemoryParcelable
+    void setupMemory(int32_t sharedMemoryIndex,
+                     int32_t dataMemoryOffset,
+                     int32_t dataSizeInBytes,
+                     int32_t readCounterOffset,
+                     int32_t writeCounterOffset,
+                     int32_t counterSizeBytes);
+
+    void setupMemory(int32_t sharedMemoryIndex,
+                     int32_t dataMemoryOffset,
+                     int32_t dataSizeInBytes);
+
+    int32_t getBytesPerFrame();
+
+    void setBytesPerFrame(int32_t bytesPerFrame);
+
+    int32_t getFramesPerBurst();
+
+    void setFramesPerBurst(int32_t framesPerBurst);
+
+    int32_t getCapacityInFrames();
+
+    void setCapacityInFrames(int32_t capacityInFrames);
+
+    /**
+     * The read and write must be symmetric.
+     */
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor);
+
+    aaudio_result_t validate();
+
+    void dump();
+
+private:
+    SharedRegionParcelable  mReadCounterParcelable;
+    SharedRegionParcelable  mWriteCounterParcelable;
+    SharedRegionParcelable  mDataParcelable;
+    int32_t                 mBytesPerFrame = 0;     // index is in frames
+    int32_t                 mFramesPerBurst = 0;    // for ISOCHRONOUS queues
+    int32_t                 mCapacityInFrames = 0;  // zero if unused
+    RingbufferFlags         mFlags = RingbufferFlags::NONE;
+};
+
+} /* namespace aaudio */
+
+#endif //BINDING_RINGBUFFER_PARCELABLE_H
diff --git a/media/liboboe/src/binding/SharedMemoryParcelable.cpp b/media/liboboe/src/binding/SharedMemoryParcelable.cpp
new file mode 100644
index 0000000..277a992
--- /dev/null
+++ b/media/liboboe/src/binding/SharedMemoryParcelable.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <aaudio/AAudioDefinitions.h>
+
+#include <binder/Parcelable.h>
+
+#include "binding/SharedMemoryParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace aaudio;
+
+SharedMemoryParcelable::SharedMemoryParcelable() {}
+SharedMemoryParcelable::~SharedMemoryParcelable() {};
+
+void SharedMemoryParcelable::setup(int fd, int32_t sizeInBytes) {
+    mFd = fd;
+    mSizeInBytes = sizeInBytes;
+}
+
+status_t SharedMemoryParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        parcel->writeDupFileDescriptor(mFd);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t SharedMemoryParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        mFd = dup(parcel->readFileDescriptor());
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+// TODO Add code to unmmap()
+
+aaudio_result_t SharedMemoryParcelable::resolve(int32_t offsetInBytes, int32_t sizeInBytes,
+                                              void **regionAddressPtr) {
+    if (offsetInBytes < 0) {
+        ALOGE("SharedMemoryParcelable illegal offsetInBytes = %d", offsetInBytes);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    } else if ((offsetInBytes + sizeInBytes) > mSizeInBytes) {
+        ALOGE("SharedMemoryParcelable out of range, offsetInBytes = %d, "
+              "sizeInBytes = %d, mSizeInBytes = %d",
+              offsetInBytes, sizeInBytes, mSizeInBytes);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    if (mResolvedAddress == nullptr) {
+        mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ|PROT_WRITE,
+                                          MAP_SHARED, mFd, 0);
+        if (mResolvedAddress == nullptr) {
+            ALOGE("SharedMemoryParcelable mmap failed for fd = %d", mFd);
+            return AAUDIO_ERROR_INTERNAL;
+        }
+    }
+    *regionAddressPtr = mResolvedAddress + offsetInBytes;
+    ALOGD("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
+    ALOGD("SharedMemoryParcelable offset by %d, *regionAddressPtr = %p",
+          offsetInBytes, *regionAddressPtr);
+    return AAUDIO_OK;
+}
+
+int32_t SharedMemoryParcelable::getSizeInBytes() {
+    return mSizeInBytes;
+}
+
+aaudio_result_t SharedMemoryParcelable::validate() {
+    if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE) {
+        ALOGE("SharedMemoryParcelable invalid mSizeInBytes = %d", mSizeInBytes);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+    if (mSizeInBytes > 0) {
+        if (mFd == -1) {
+            ALOGE("SharedMemoryParcelable uninitialized mFd = %d", mFd);
+            return AAUDIO_ERROR_INTERNAL;
+        }
+    }
+    return AAUDIO_OK;
+}
+
+void SharedMemoryParcelable::dump() {
+    ALOGD("SharedMemoryParcelable mFd = %d", mFd);
+    ALOGD("SharedMemoryParcelable mSizeInBytes = %d", mSizeInBytes);
+    ALOGD("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
+}
diff --git a/media/liboboe/src/binding/SharedMemoryParcelable.h b/media/liboboe/src/binding/SharedMemoryParcelable.h
new file mode 100644
index 0000000..5768ea9
--- /dev/null
+++ b/media/liboboe/src/binding/SharedMemoryParcelable.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_SHAREDMEMORYPARCELABLE_H
+#define BINDING_SHAREDMEMORYPARCELABLE_H
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace aaudio {
+
+// Arbitrary limits for sanity checks. TODO remove after debugging.
+#define MAX_SHARED_MEMORIES (32)
+#define MAX_MMAP_OFFSET (32 * 1024)
+#define MAX_MMAP_SIZE (32 * 1024)
+
+/**
+ * This is a parcelable description of a shared memory referenced by a file descriptor.
+ * It may be divided into several regions.
+ */
+class SharedMemoryParcelable : public Parcelable {
+public:
+    SharedMemoryParcelable();
+    virtual ~SharedMemoryParcelable();
+
+    void setup(int fd, int32_t sizeInBytes);
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    aaudio_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
+
+    int32_t getSizeInBytes();
+
+    aaudio_result_t validate();
+
+    void dump();
+
+protected:
+    int mFd = -1;
+    int32_t mSizeInBytes = 0;
+    uint8_t *mResolvedAddress = nullptr;
+};
+
+} /* namespace aaudio */
+
+#endif //BINDING_SHAREDMEMORYPARCELABLE_H
diff --git a/media/liboboe/src/binding/SharedRegionParcelable.cpp b/media/liboboe/src/binding/SharedRegionParcelable.cpp
new file mode 100644
index 0000000..a3e0111
--- /dev/null
+++ b/media/liboboe/src/binding/SharedRegionParcelable.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcelable.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+#include "binding/SharedMemoryParcelable.h"
+#include "binding/SharedRegionParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace aaudio;
+
+SharedRegionParcelable::SharedRegionParcelable() {}
+SharedRegionParcelable::~SharedRegionParcelable() {}
+
+void SharedRegionParcelable::setup(int32_t sharedMemoryIndex,
+                                   int32_t offsetInBytes,
+                                   int32_t sizeInBytes) {
+    mSharedMemoryIndex = sharedMemoryIndex;
+    mOffsetInBytes = offsetInBytes;
+    mSizeInBytes = sizeInBytes;
+}
+
+status_t SharedRegionParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        parcel->writeInt32(mSharedMemoryIndex);
+        parcel->writeInt32(mOffsetInBytes);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t SharedRegionParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        parcel->readInt32(&mSharedMemoryIndex);
+        parcel->readInt32(&mOffsetInBytes);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+aaudio_result_t SharedRegionParcelable::resolve(SharedMemoryParcelable *memoryParcels,
+                                              void **regionAddressPtr) {
+    if (mSizeInBytes == 0) {
+        *regionAddressPtr = nullptr;
+        return AAUDIO_OK;
+    }
+    if (mSharedMemoryIndex < 0) {
+        ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+    SharedMemoryParcelable *memoryParcel = &memoryParcels[mSharedMemoryIndex];
+    return memoryParcel->resolve(mOffsetInBytes, mSizeInBytes, regionAddressPtr);
+}
+
+aaudio_result_t SharedRegionParcelable::validate() {
+    if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE) {
+        ALOGE("SharedRegionParcelable invalid mSizeInBytes = %d", mSizeInBytes);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+    if (mSizeInBytes > 0) {
+        if (mOffsetInBytes < 0 || mOffsetInBytes >= MAX_MMAP_OFFSET) {
+            ALOGE("SharedRegionParcelable invalid mOffsetInBytes = %d", mOffsetInBytes);
+            return AAUDIO_ERROR_INTERNAL;
+        }
+        if (mSharedMemoryIndex < 0 || mSharedMemoryIndex >= MAX_SHARED_MEMORIES) {
+            ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
+            return AAUDIO_ERROR_INTERNAL;
+        }
+    }
+    return AAUDIO_OK;
+}
+
+void SharedRegionParcelable::dump() {
+    ALOGD("SharedRegionParcelable mSizeInBytes = %d -----", mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        ALOGD("SharedRegionParcelable mSharedMemoryIndex = %d", mSharedMemoryIndex);
+        ALOGD("SharedRegionParcelable mOffsetInBytes = %d", mOffsetInBytes);
+    }
+}
diff --git a/media/liboboe/src/binding/SharedRegionParcelable.h b/media/liboboe/src/binding/SharedRegionParcelable.h
new file mode 100644
index 0000000..d6c2281
--- /dev/null
+++ b/media/liboboe/src/binding/SharedRegionParcelable.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_SHAREDREGIONPARCELABLE_H
+#define BINDING_SHAREDREGIONPARCELABLE_H
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcelable.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+#include "binding/SharedMemoryParcelable.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace aaudio {
+
+class SharedRegionParcelable : public Parcelable {
+public:
+    SharedRegionParcelable();
+    virtual ~SharedRegionParcelable();
+
+    void setup(int32_t sharedMemoryIndex, int32_t offsetInBytes, int32_t sizeInBytes);
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, void **regionAddressPtr);
+
+    aaudio_result_t validate();
+
+    void dump();
+
+protected:
+    int32_t mSharedMemoryIndex = -1;
+    int32_t mOffsetInBytes     = 0;
+    int32_t mSizeInBytes       = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //BINDING_SHAREDREGIONPARCELABLE_H
diff --git a/media/liboboe/src/client/AudioEndpoint.cpp b/media/liboboe/src/client/AudioEndpoint.cpp
new file mode 100644
index 0000000..5cd9782
--- /dev/null
+++ b/media/liboboe/src/client/AudioEndpoint.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <cassert>
+#include <aaudio/AAudioDefinitions.h>
+
+#include "AudioEndpointParcelable.h"
+#include "AudioEndpoint.h"
+#include "AAudioServiceMessage.h"
+
+using namespace android;
+using namespace aaudio;
+
+AudioEndpoint::AudioEndpoint()
+    : mOutputFreeRunning(false)
+    , mDataReadCounter(0)
+    , mDataWriteCounter(0)
+{
+}
+
+AudioEndpoint::~AudioEndpoint()
+{
+}
+
+static void AudioEndpoint_validateQueueDescriptor(const char *type,
+                                                  const RingBufferDescriptor *descriptor) {
+    assert(descriptor->capacityInFrames > 0);
+    assert(descriptor->bytesPerFrame > 1);
+    assert(descriptor->dataAddress != nullptr);
+    ALOGD("AudioEndpoint_validateQueueDescriptor %s, dataAddress at %p ====================",
+          type,
+          descriptor->dataAddress);
+    ALOGD("AudioEndpoint_validateQueueDescriptor  readCounter at %p, writeCounter at %p",
+          descriptor->readCounterAddress,
+          descriptor->writeCounterAddress);
+
+    // Try to READ from the data area.
+    uint8_t value = descriptor->dataAddress[0];
+    ALOGD("AudioEndpoint_validateQueueDescriptor() dataAddress[0] = %d, then try to write",
+        (int) value);
+    // Try to WRITE to the data area.
+    descriptor->dataAddress[0] = value;
+    ALOGD("AudioEndpoint_validateQueueDescriptor() wrote successfully");
+
+    if (descriptor->readCounterAddress) {
+        fifo_counter_t counter = *descriptor->readCounterAddress;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() *readCounterAddress = %d, now write",
+              (int) counter);
+        *descriptor->readCounterAddress = counter;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() wrote readCounterAddress successfully");
+    }
+    if (descriptor->writeCounterAddress) {
+        fifo_counter_t counter = *descriptor->writeCounterAddress;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() *writeCounterAddress = %d, now write",
+              (int) counter);
+        *descriptor->writeCounterAddress = counter;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() wrote writeCounterAddress successfully");
+    }
+}
+
+void AudioEndpoint_validateDescriptor(const EndpointDescriptor *pEndpointDescriptor) {
+    AudioEndpoint_validateQueueDescriptor("msg", &pEndpointDescriptor->upMessageQueueDescriptor);
+    AudioEndpoint_validateQueueDescriptor("data", &pEndpointDescriptor->downDataQueueDescriptor);
+}
+
+aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
+{
+    aaudio_result_t result = AAUDIO_OK;
+    AudioEndpoint_validateDescriptor(pEndpointDescriptor); // FIXME remove after debugging
+
+    const RingBufferDescriptor *descriptor = &pEndpointDescriptor->upMessageQueueDescriptor;
+    assert(descriptor->bytesPerFrame == sizeof(AAudioServiceMessage));
+    assert(descriptor->readCounterAddress != nullptr);
+    assert(descriptor->writeCounterAddress != nullptr);
+    mUpCommandQueue = new FifoBuffer(
+            descriptor->bytesPerFrame,
+            descriptor->capacityInFrames,
+            descriptor->readCounterAddress,
+            descriptor->writeCounterAddress,
+            descriptor->dataAddress
+    );
+    /* TODO mDownCommandQueue
+    if (descriptor->capacityInFrames > 0) {
+        descriptor = &pEndpointDescriptor->downMessageQueueDescriptor;
+        mDownCommandQueue = new FifoBuffer(
+                descriptor->capacityInFrames,
+                descriptor->bytesPerFrame,
+                descriptor->readCounterAddress,
+                descriptor->writeCounterAddress,
+                descriptor->dataAddress
+        );
+    }
+     */
+    descriptor = &pEndpointDescriptor->downDataQueueDescriptor;
+    assert(descriptor->capacityInFrames > 0);
+    assert(descriptor->bytesPerFrame > 1);
+    assert(descriptor->bytesPerFrame < 4 * 16); // FIXME just for initial debugging
+    assert(descriptor->framesPerBurst > 0);
+    assert(descriptor->framesPerBurst < 8 * 1024); // FIXME just for initial debugging
+    assert(descriptor->dataAddress != nullptr);
+    ALOGD("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
+    ALOGD("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
+    mOutputFreeRunning = descriptor->readCounterAddress == nullptr;
+    ALOGD("AudioEndpoint::configure() mOutputFreeRunning = %d", mOutputFreeRunning ? 1 : 0);
+    int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
+                                  ? &mDataReadCounter
+                                  : descriptor->readCounterAddress;
+    int64_t *writeCounterAddress = (descriptor->writeCounterAddress == nullptr)
+                                  ? &mDataWriteCounter
+                                  : descriptor->writeCounterAddress;
+    mDownDataQueue = new FifoBuffer(
+            descriptor->bytesPerFrame,
+            descriptor->capacityInFrames,
+            readCounterAddress,
+            writeCounterAddress,
+            descriptor->dataAddress
+    );
+    uint32_t threshold = descriptor->capacityInFrames / 2;
+    mDownDataQueue->setThreshold(threshold);
+    return result;
+}
+
+aaudio_result_t AudioEndpoint::readUpCommand(AAudioServiceMessage *commandPtr)
+{
+    return mUpCommandQueue->read(commandPtr, 1);
+}
+
+aaudio_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
+{
+    return mDownDataQueue->write(buffer, numFrames);
+}
+
+void AudioEndpoint::setDownDataReadCounter(fifo_counter_t framesRead)
+{
+    mDownDataQueue->setReadCounter(framesRead);
+}
+
+fifo_counter_t AudioEndpoint::getDownDataReadCounter()
+{
+    return mDownDataQueue->getReadCounter();
+}
+
+void AudioEndpoint::setDownDataWriteCounter(fifo_counter_t framesRead)
+{
+    mDownDataQueue->setWriteCounter(framesRead);
+}
+
+fifo_counter_t AudioEndpoint::getDownDataWriteCounter()
+{
+    return mDownDataQueue->getWriteCounter();
+}
+
+aaudio_size_frames_t AudioEndpoint::setBufferSizeInFrames(aaudio_size_frames_t requestedFrames,
+                                            aaudio_size_frames_t *actualFrames)
+{
+    if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
+        requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
+    }
+    mDownDataQueue->setThreshold(requestedFrames);
+    *actualFrames = mDownDataQueue->getThreshold();
+    return AAUDIO_OK;
+}
+
+int32_t AudioEndpoint::getBufferSizeInFrames() const
+{
+    return mDownDataQueue->getThreshold();
+}
+
+int32_t AudioEndpoint::getBufferCapacityInFrames() const
+{
+    return (int32_t)mDownDataQueue->getBufferCapacityInFrames();
+}
+
+int32_t AudioEndpoint::getFullFramesAvailable()
+{
+    return mDownDataQueue->getFifoControllerBase()->getFullFramesAvailable();
+}
diff --git a/media/liboboe/src/client/AudioEndpoint.h b/media/liboboe/src/client/AudioEndpoint.h
new file mode 100644
index 0000000..e786513
--- /dev/null
+++ b/media/liboboe/src/client/AudioEndpoint.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AUDIO_ENDPOINT_H
+#define AAUDIO_AUDIO_ENDPOINT_H
+
+#include <aaudio/AAudio.h>
+
+#include "AAudioServiceMessage.h"
+#include "AudioEndpointParcelable.h"
+#include "fifo/FifoBuffer.h"
+
+namespace aaudio {
+
+#define ENDPOINT_DATA_QUEUE_SIZE_MIN   64
+
+/**
+ * A sink for audio.
+ * Used by the client code.
+ */
+class AudioEndpoint {
+
+public:
+    AudioEndpoint();
+    virtual ~AudioEndpoint();
+
+    /**
+     * Configure based on the EndPointDescriptor_t.
+     */
+    aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor);
+
+    /**
+     * Read from a command passed up from the Server.
+     * @return 1 if command received, 0 for no command, or negative error.
+     */
+    aaudio_result_t readUpCommand(AAudioServiceMessage *commandPtr);
+
+    /**
+     * Non-blocking write.
+     * @return framesWritten or a negative error code.
+     */
+    aaudio_result_t writeDataNow(const void *buffer, int32_t numFrames);
+
+    /**
+     * Set the read index in the downData queue.
+     * This is needed if the reader is not updating the index itself.
+     */
+    void setDownDataReadCounter(fifo_counter_t framesRead);
+    fifo_counter_t getDownDataReadCounter();
+
+    void setDownDataWriteCounter(fifo_counter_t framesWritten);
+    fifo_counter_t getDownDataWriteCounter();
+
+    /**
+     * The result is not valid until after configure() is called.
+     *
+     * @return true if the output buffer read position is not updated, eg. DMA
+     */
+    bool isOutputFreeRunning() const { return mOutputFreeRunning; }
+
+    int32_t setBufferSizeInFrames(aaudio_size_frames_t requestedFrames,
+                                  aaudio_size_frames_t *actualFrames);
+    aaudio_size_frames_t getBufferSizeInFrames() const;
+
+    aaudio_size_frames_t getBufferCapacityInFrames() const;
+
+    aaudio_size_frames_t getFullFramesAvailable();
+
+private:
+    FifoBuffer   * mUpCommandQueue;
+    FifoBuffer   * mDownDataQueue;
+    bool           mOutputFreeRunning;
+    fifo_counter_t mDataReadCounter; // only used if free-running
+    fifo_counter_t mDataWriteCounter; // only used if free-running
+};
+
+} // namespace aaudio
+
+#endif //AAUDIO_AUDIO_ENDPOINT_H
diff --git a/media/liboboe/src/client/AudioStreamInternal.cpp b/media/liboboe/src/client/AudioStreamInternal.cpp
new file mode 100644
index 0000000..8d7e93f
--- /dev/null
+++ b/media/liboboe/src/client/AudioStreamInternal.cpp
@@ -0,0 +1,563 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <assert.h>
+
+#include <binder/IServiceManager.h>
+#include <utils/Mutex.h>
+
+#include <aaudio/AAudio.h>
+
+#include "AudioClock.h"
+#include "AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceMessage.h"
+
+#include "AudioStreamInternal.h"
+
+#define LOG_TIMESTAMPS   0
+
+using android::String16;
+using android::IServiceManager;
+using android::defaultServiceManager;
+using android::interface_cast;
+using android::Mutex;
+
+using namespace aaudio;
+
+static android::Mutex gServiceLock;
+static sp<IAAudioService>  gAAudioService;
+
+#define AAUDIO_SERVICE_NAME   "AAudioService"
+
+// Helper function to get access to the "AAudioService" service.
+// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
+static const sp<IAAudioService> getAAudioService() {
+    sp<IBinder> binder;
+    Mutex::Autolock _l(gServiceLock);
+    if (gAAudioService == 0) {
+        sp<IServiceManager> sm = defaultServiceManager();
+        // Try several times to get the service.
+        int retries = 4;
+        do {
+            binder = sm->getService(String16(AAUDIO_SERVICE_NAME)); // This will wait a while.
+            if (binder != 0) {
+                break;
+            }
+        } while (retries-- > 0);
+
+        if (binder != 0) {
+            // TODO Add linkToDeath() like in frameworks/av/media/libaudioclient/AudioSystem.cpp
+            // TODO Create a DeathRecipient that disconnects all active streams.
+            gAAudioService = interface_cast<IAAudioService>(binder);
+        } else {
+            ALOGE("AudioStreamInternal could not get %s", AAUDIO_SERVICE_NAME);
+        }
+    }
+    return gAAudioService;
+}
+
+AudioStreamInternal::AudioStreamInternal()
+        : AudioStream()
+        , mClockModel()
+        , mAudioEndpoint()
+        , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
+        , mFramesPerBurst(16)
+{
+}
+
+AudioStreamInternal::~AudioStreamInternal() {
+}
+
+aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
+
+    const sp<IAAudioService>& service = getAAudioService();
+    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+
+    aaudio_result_t result = AAUDIO_OK;
+    AAudioStreamRequest request;
+    AAudioStreamConfiguration configuration;
+
+    result = AudioStream::open(builder);
+    if (result < 0) {
+        return result;
+    }
+
+    // Build the request to send to the server.
+    request.setUserId(getuid());
+    request.setProcessId(getpid());
+    request.getConfiguration().setDeviceId(getDeviceId());
+    request.getConfiguration().setSampleRate(getSampleRate());
+    request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
+    request.getConfiguration().setAudioFormat(getFormat());
+    request.dump();
+
+    mServiceStreamHandle = service->openStream(request, configuration);
+    ALOGD("AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
+         (unsigned int)mServiceStreamHandle);
+    if (mServiceStreamHandle < 0) {
+        result = mServiceStreamHandle;
+        ALOGE("AudioStreamInternal.open(): acquireRealtimeStream aaudio_result_t = 0x%08X", result);
+    } else {
+        result = configuration.validate();
+        if (result != AAUDIO_OK) {
+            close();
+            return result;
+        }
+        // Save results of the open.
+        setSampleRate(configuration.getSampleRate());
+        setSamplesPerFrame(configuration.getSamplesPerFrame());
+        setFormat(configuration.getAudioFormat());
+
+        aaudio::AudioEndpointParcelable parcelable;
+        result = service->getStreamDescription(mServiceStreamHandle, parcelable);
+        if (result != AAUDIO_OK) {
+            ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
+            service->closeStream(mServiceStreamHandle);
+            return result;
+        }
+        // resolve parcelable into a descriptor
+        parcelable.resolve(&mEndpointDescriptor);
+
+        // Configure endpoint based on descriptor.
+        mAudioEndpoint.configure(&mEndpointDescriptor);
+
+
+        mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
+        assert(mFramesPerBurst >= 16);
+        assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024);
+
+        mClockModel.setSampleRate(getSampleRate());
+        mClockModel.setFramesPerBurst(mFramesPerBurst);
+
+        setState(AAUDIO_STREAM_STATE_OPEN);
+    }
+    return result;
+}
+
+aaudio_result_t AudioStreamInternal::close() {
+    ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
+    if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
+        aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
+        mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
+        const sp<IAAudioService>& aaudioService = getAAudioService();
+        if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+        aaudioService->closeStream(serviceStreamHandle);
+        return AAUDIO_OK;
+    } else {
+        return AAUDIO_ERROR_INVALID_HANDLE;
+    }
+}
+
+aaudio_result_t AudioStreamInternal::requestStart()
+{
+    aaudio_nanoseconds_t startTime;
+    ALOGD("AudioStreamInternal(): start()");
+    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    const sp<IAAudioService>& aaudioService = getAAudioService();
+    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+    startTime = AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC);
+    mClockModel.start(startTime);
+    processTimestamp(0, startTime);
+    setState(AAUDIO_STREAM_STATE_STARTING);
+    return aaudioService->startStream(mServiceStreamHandle);
+}
+
+aaudio_result_t AudioStreamInternal::requestPause()
+{
+    ALOGD("AudioStreamInternal(): pause()");
+    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    const sp<IAAudioService>& aaudioService = getAAudioService();
+    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+    mClockModel.stop(AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC));
+    setState(AAUDIO_STREAM_STATE_PAUSING);
+    return aaudioService->pauseStream(mServiceStreamHandle);
+}
+
+aaudio_result_t AudioStreamInternal::requestFlush() {
+    ALOGD("AudioStreamInternal(): flush()");
+    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    const sp<IAAudioService>& aaudioService = getAAudioService();
+    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+setState(AAUDIO_STREAM_STATE_FLUSHING);
+    return aaudioService->flushStream(mServiceStreamHandle);
+}
+
+void AudioStreamInternal::onFlushFromServer() {
+    ALOGD("AudioStreamInternal(): onFlushFromServer()");
+    aaudio_position_frames_t readCounter = mAudioEndpoint.getDownDataReadCounter();
+    aaudio_position_frames_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
+    // Bump offset so caller does not see the retrograde motion in getFramesRead().
+    aaudio_position_frames_t framesFlushed = writeCounter - readCounter;
+    mFramesOffsetFromService += framesFlushed;
+    // Flush written frames by forcing writeCounter to readCounter.
+    // This is because we cannot move the read counter in the hardware.
+    mAudioEndpoint.setDownDataWriteCounter(readCounter);
+}
+
+aaudio_result_t AudioStreamInternal::requestStop()
+{
+    // TODO better implementation of requestStop()
+    aaudio_result_t result = requestPause();
+    if (result == AAUDIO_OK) {
+        aaudio_stream_state_t state;
+        result = waitForStateChange(AAUDIO_STREAM_STATE_PAUSING,
+                                    &state,
+                                    500 * AAUDIO_NANOS_PER_MILLISECOND);// TODO temporary code
+        if (result == AAUDIO_OK) {
+            result = requestFlush();
+        }
+    }
+    return result;
+}
+
+aaudio_result_t AudioStreamInternal::registerThread() {
+    ALOGD("AudioStreamInternal(): registerThread()");
+    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    const sp<IAAudioService>& aaudioService = getAAudioService();
+    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return aaudioService->registerAudioThread(mServiceStreamHandle,
+                                         gettid(),
+                                         getPeriodNanoseconds());
+}
+
+aaudio_result_t AudioStreamInternal::unregisterThread() {
+    ALOGD("AudioStreamInternal(): unregisterThread()");
+    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    const sp<IAAudioService>& aaudioService = getAAudioService();
+    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+    return aaudioService->unregisterAudioThread(mServiceStreamHandle, gettid());
+}
+
+// TODO use aaudio_clockid_t all the way down to AudioClock
+aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
+                           aaudio_position_frames_t *framePosition,
+                           aaudio_nanoseconds_t *timeNanoseconds) {
+// TODO implement using real HAL
+    aaudio_nanoseconds_t time = AudioClock::getNanoseconds();
+    *framePosition = mClockModel.convertTimeToPosition(time);
+    *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamInternal::updateState() {
+    return processCommands();
+}
+
+#if LOG_TIMESTAMPS
+static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
+    static int64_t oldPosition = 0;
+    static aaudio_nanoseconds_t oldTime = 0;
+    int64_t framePosition = command.timestamp.position;
+    aaudio_nanoseconds_t nanoTime = command.timestamp.timestamp;
+    ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
+         (long long) framePosition,
+         (long long) nanoTime);
+    int64_t nanosDelta = nanoTime - oldTime;
+    if (nanosDelta > 0 && oldTime > 0) {
+        int64_t framesDelta = framePosition - oldPosition;
+        int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
+        ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
+        ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
+        ALOGD("AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
+    }
+    oldPosition = framePosition;
+    oldTime = nanoTime;
+}
+#endif
+
+aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
+    aaudio_position_frames_t framePosition = 0;
+#if LOG_TIMESTAMPS
+    AudioStreamInternal_LogTimestamp(command);
+#endif
+    framePosition = message->timestamp.position;
+    processTimestamp(framePosition, message->timestamp.timestamp);
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
+    aaudio_result_t result = AAUDIO_OK;
+    ALOGD("processCommands() got event %d", message->event.event);
+    switch (message->event.event) {
+        case AAUDIO_SERVICE_EVENT_STARTED:
+            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
+            setState(AAUDIO_STREAM_STATE_STARTED);
+            break;
+        case AAUDIO_SERVICE_EVENT_PAUSED:
+            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
+            setState(AAUDIO_STREAM_STATE_PAUSED);
+            break;
+        case AAUDIO_SERVICE_EVENT_FLUSHED:
+            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
+            setState(AAUDIO_STREAM_STATE_FLUSHED);
+            onFlushFromServer();
+            break;
+        case AAUDIO_SERVICE_EVENT_CLOSED:
+            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
+            setState(AAUDIO_STREAM_STATE_CLOSED);
+            break;
+        case AAUDIO_SERVICE_EVENT_DISCONNECTED:
+            result = AAUDIO_ERROR_DISCONNECTED;
+            ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
+            break;
+        default:
+            ALOGW("WARNING - processCommands() Unrecognized event = %d",
+                 (int) message->event.event);
+            break;
+    }
+    return result;
+}
+
+// Process all the commands coming from the server.
+aaudio_result_t AudioStreamInternal::processCommands() {
+    aaudio_result_t result = AAUDIO_OK;
+
+    while (result == AAUDIO_OK) {
+        AAudioServiceMessage message;
+        if (mAudioEndpoint.readUpCommand(&message) != 1) {
+            break; // no command this time, no problem
+        }
+        switch (message.what) {
+        case AAudioServiceMessage::code::TIMESTAMP:
+            result = onTimestampFromServer(&message);
+            break;
+
+        case AAudioServiceMessage::code::EVENT:
+            result = onEventFromServer(&message);
+            break;
+
+        default:
+            ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
+                 (int) message.what);
+            result = AAUDIO_ERROR_UNEXPECTED_VALUE;
+            break;
+        }
+    }
+    return result;
+}
+
+// Write the data, block if needed and timeoutMillis > 0
+aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
+                                         aaudio_nanoseconds_t timeoutNanoseconds)
+{
+    aaudio_result_t result = AAUDIO_OK;
+    uint8_t* source = (uint8_t*)buffer;
+    aaudio_nanoseconds_t currentTimeNanos = AudioClock::getNanoseconds();
+    aaudio_nanoseconds_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
+    int32_t framesLeft = numFrames;
+//    ALOGD("AudioStreamInternal::write(%p, %d) at time %08llu , mState = %d ------------------",
+//         buffer, numFrames, (unsigned long long) currentTimeNanos, mState);
+
+    // Write until all the data has been written or until a timeout occurs.
+    while (framesLeft > 0) {
+        // The call to writeNow() will not block. It will just write as much as it can.
+        aaudio_nanoseconds_t wakeTimeNanos = 0;
+        aaudio_result_t framesWritten = writeNow(source, framesLeft,
+                                               currentTimeNanos, &wakeTimeNanos);
+//        ALOGD("AudioStreamInternal::write() writeNow() framesLeft = %d --> framesWritten = %d", framesLeft, framesWritten);
+        if (framesWritten < 0) {
+            result = framesWritten;
+            break;
+        }
+        framesLeft -= (int32_t) framesWritten;
+        source += framesWritten * getBytesPerFrame();
+
+        // Should we block?
+        if (timeoutNanoseconds == 0) {
+            break; // don't block
+        } else if (framesLeft > 0) {
+            //ALOGD("AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
+            // clip the wake time to something reasonable
+            if (wakeTimeNanos < currentTimeNanos) {
+                wakeTimeNanos = currentTimeNanos;
+            }
+            if (wakeTimeNanos > deadlineNanos) {
+                // If we time out, just return the framesWritten so far.
+                ALOGE("AudioStreamInternal::write(): timed out after %lld nanos", (long long) timeoutNanoseconds);
+                break;
+            }
+
+            //ALOGD("AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
+            //        (long long) (wakeTimeNanos - currentTimeNanos));
+            AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
+            currentTimeNanos = AudioClock::getNanoseconds();
+        }
+    }
+
+    // return error or framesWritten
+    return (result < 0) ? result : numFrames - framesLeft;
+}
+
+// Write as much data as we can without blocking.
+aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
+                                         aaudio_nanoseconds_t currentNanoTime, aaudio_nanoseconds_t *wakeTimePtr) {
+    {
+        aaudio_result_t result = processCommands();
+        if (result != AAUDIO_OK) {
+            return result;
+        }
+    }
+
+    if (mAudioEndpoint.isOutputFreeRunning()) {
+        // Update data queue based on the timing model.
+        int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+        mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
+        // If the read index passed the write index then consider it an underrun.
+        if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+            mXRunCount++;
+        }
+    }
+    // TODO else query from endpoint cuz set by actual reader, maybe
+
+    // Write some data to the buffer.
+    int32_t framesWritten = mAudioEndpoint.writeDataNow(buffer, numFrames);
+    if (framesWritten > 0) {
+        incrementFramesWritten(framesWritten);
+    }
+    //ALOGD("AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
+    //    numFrames, framesWritten);
+
+    // Calculate an ideal time to wake up.
+    if (wakeTimePtr != nullptr && framesWritten >= 0) {
+        // By default wake up a few milliseconds from now.  // TODO review
+        aaudio_nanoseconds_t wakeTime = currentNanoTime + (2 * AAUDIO_NANOS_PER_MILLISECOND);
+        switch (getState()) {
+            case AAUDIO_STREAM_STATE_OPEN:
+            case AAUDIO_STREAM_STATE_STARTING:
+                if (framesWritten != 0) {
+                    // Don't wait to write more data. Just prime the buffer.
+                    wakeTime = currentNanoTime;
+                }
+                break;
+            case AAUDIO_STREAM_STATE_STARTED:   // When do we expect the next read burst to occur?
+                {
+                    uint32_t burstSize = mFramesPerBurst;
+                    if (burstSize < 32) {
+                        burstSize = 32; // TODO review
+                    }
+
+                    uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
+                    wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+                }
+                break;
+            default:
+                break;
+        }
+        *wakeTimePtr = wakeTime;
+
+    }
+//    ALOGD("AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
+//         (unsigned long long)currentNanoTime,
+//         (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
+//         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+    return framesWritten;
+}
+
+aaudio_result_t AudioStreamInternal::waitForStateChange(aaudio_stream_state_t currentState,
+                                                      aaudio_stream_state_t *nextState,
+                                                      aaudio_nanoseconds_t timeoutNanoseconds)
+
+{
+    aaudio_result_t result = processCommands();
+//    ALOGD("AudioStreamInternal::waitForStateChange() - processCommands() returned %d", result);
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+    // TODO replace this polling with a timed sleep on a futex on the message queue
+    int32_t durationNanos = 5 * AAUDIO_NANOS_PER_MILLISECOND;
+    aaudio_stream_state_t state = getState();
+//    ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
+    while (state == currentState && timeoutNanoseconds > 0) {
+        // TODO use futex from service message queue
+        if (durationNanos > timeoutNanoseconds) {
+            durationNanos = timeoutNanoseconds;
+        }
+        AudioClock::sleepForNanos(durationNanos);
+        timeoutNanoseconds -= durationNanos;
+
+        result = processCommands();
+        if (result != AAUDIO_OK) {
+            return result;
+        }
+
+        state = getState();
+//        ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
+    }
+    if (nextState != nullptr) {
+        *nextState = state;
+    }
+    return (state == currentState) ? AAUDIO_ERROR_TIMEOUT : AAUDIO_OK;
+}
+
+
+void AudioStreamInternal::processTimestamp(uint64_t position, aaudio_nanoseconds_t time) {
+    mClockModel.processTimestamp( position, time);
+}
+
+aaudio_result_t AudioStreamInternal::setBufferSize(aaudio_size_frames_t requestedFrames,
+                                        aaudio_size_frames_t *actualFrames) {
+    return mAudioEndpoint.setBufferSizeInFrames(requestedFrames, actualFrames);
+}
+
+aaudio_size_frames_t AudioStreamInternal::getBufferSize() const
+{
+    return mAudioEndpoint.getBufferSizeInFrames();
+}
+
+aaudio_size_frames_t AudioStreamInternal::getBufferCapacity() const
+{
+    return mAudioEndpoint.getBufferCapacityInFrames();
+}
+
+aaudio_size_frames_t AudioStreamInternal::getFramesPerBurst() const
+{
+    return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
+}
+
+aaudio_position_frames_t AudioStreamInternal::getFramesRead()
+{
+    aaudio_position_frames_t framesRead =
+            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+            + mFramesOffsetFromService;
+    // Prevent retrograde motion.
+    if (framesRead < mLastFramesRead) {
+        framesRead = mLastFramesRead;
+    } else {
+        mLastFramesRead = framesRead;
+    }
+    ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+    return framesRead;
+}
+
+// TODO implement getTimestamp
diff --git a/media/liboboe/src/client/AudioStreamInternal.h b/media/liboboe/src/client/AudioStreamInternal.h
new file mode 100644
index 0000000..666df3a
--- /dev/null
+++ b/media/liboboe/src/client/AudioStreamInternal.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AUDIOSTREAMINTERNAL_H
+#define AAUDIO_AUDIOSTREAMINTERNAL_H
+
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+#include "binding/IAAudioService.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "client/IsochronousClockModel.h"
+#include "client/AudioEndpoint.h"
+#include "core/AudioStream.h"
+
+using android::sp;
+using android::IAAudioService;
+
+namespace aaudio {
+
+// A stream that talks to the AAudioService or directly to a HAL.
+class AudioStreamInternal : public AudioStream {
+
+public:
+    AudioStreamInternal();
+    virtual ~AudioStreamInternal();
+
+    // =========== Begin ABSTRACT methods ===========================
+    virtual aaudio_result_t requestStart() override;
+
+    virtual aaudio_result_t requestPause() override;
+
+    virtual aaudio_result_t requestFlush() override;
+
+    virtual aaudio_result_t requestStop() override;
+
+    // TODO use aaudio_clockid_t all the way down to AudioClock
+    virtual aaudio_result_t getTimestamp(clockid_t clockId,
+                                       aaudio_position_frames_t *framePosition,
+                                       aaudio_nanoseconds_t *timeNanoseconds) override;
+
+
+    virtual aaudio_result_t updateState() override;
+    // =========== End ABSTRACT methods ===========================
+
+    virtual aaudio_result_t open(const AudioStreamBuilder &builder) override;
+
+    virtual aaudio_result_t close() override;
+
+    virtual aaudio_result_t write(const void *buffer,
+                             int32_t numFrames,
+                             aaudio_nanoseconds_t timeoutNanoseconds) override;
+
+    virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
+                                          aaudio_stream_state_t *nextState,
+                                          aaudio_nanoseconds_t timeoutNanoseconds) override;
+
+    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
+                                        aaudio_size_frames_t *actualFrames) override;
+
+    virtual aaudio_size_frames_t getBufferSize() const override;
+
+    virtual aaudio_size_frames_t getBufferCapacity() const override;
+
+    virtual aaudio_size_frames_t getFramesPerBurst() const override;
+
+    virtual aaudio_position_frames_t getFramesRead() override;
+
+    virtual int32_t getXRunCount() const override {
+        return mXRunCount;
+    }
+
+    virtual aaudio_result_t registerThread() override;
+
+    virtual aaudio_result_t unregisterThread() override;
+
+protected:
+
+    aaudio_result_t processCommands();
+
+/**
+ * Low level write that will not block. It will just write as much as it can.
+ *
+ * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames written or a negative error code.
+ */
+    virtual aaudio_result_t writeNow(const void *buffer,
+                                int32_t numFrames,
+                                aaudio_nanoseconds_t currentTimeNanos,
+                                aaudio_nanoseconds_t *wakeTimePtr);
+
+    void onFlushFromServer();
+
+    aaudio_result_t onEventFromServer(AAudioServiceMessage *message);
+
+    aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message);
+
+private:
+    IsochronousClockModel    mClockModel;
+    AudioEndpoint            mAudioEndpoint;
+    aaudio_handle_t            mServiceStreamHandle;
+    EndpointDescriptor       mEndpointDescriptor;
+    // Offset from underlying frame position.
+    aaudio_position_frames_t   mFramesOffsetFromService = 0;
+    aaudio_position_frames_t   mLastFramesRead = 0;
+    aaudio_size_frames_t       mFramesPerBurst;
+    int32_t                  mXRunCount = 0;
+
+    void processTimestamp(uint64_t position, aaudio_nanoseconds_t time);
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AUDIOSTREAMINTERNAL_H
diff --git a/media/liboboe/src/client/IsochronousClockModel.cpp b/media/liboboe/src/client/IsochronousClockModel.cpp
new file mode 100644
index 0000000..bdb491d
--- /dev/null
+++ b/media/liboboe/src/client/IsochronousClockModel.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <aaudio/AAudioDefinitions.h>
+
+#include "IsochronousClockModel.h"
+
+#define MIN_LATENESS_NANOS (10 * AAUDIO_NANOS_PER_MICROSECOND)
+
+using namespace android;
+using namespace aaudio;
+
+IsochronousClockModel::IsochronousClockModel()
+        : mSampleRate(48000)
+        , mFramesPerBurst(64)
+        , mMaxLatenessInNanos(0)
+        , mMarkerFramePosition(0)
+        , mMarkerNanoTime(0)
+        , mState(STATE_STOPPED)
+{
+}
+
+IsochronousClockModel::~IsochronousClockModel() {
+}
+
+void IsochronousClockModel::start(aaudio_nanoseconds_t nanoTime)
+{
+    mMarkerNanoTime = nanoTime;
+    mState = STATE_STARTING;
+}
+
+void IsochronousClockModel::stop(aaudio_nanoseconds_t nanoTime)
+{
+    mMarkerNanoTime = nanoTime;
+    mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
+    mState = STATE_STOPPED;
+}
+
+void IsochronousClockModel::processTimestamp(aaudio_position_frames_t framePosition,
+                                             aaudio_nanoseconds_t nanoTime) {
+    int64_t framesDelta = framePosition - mMarkerFramePosition;
+    int64_t nanosDelta = nanoTime - mMarkerNanoTime;
+    if (nanosDelta < 1000) {
+        return;
+    }
+
+//    ALOGI("processTimestamp() - mMarkerFramePosition = %lld at mMarkerNanoTime %llu",
+//         (long long)mMarkerFramePosition,
+//         (long long)mMarkerNanoTime);
+//    ALOGI("processTimestamp() - framePosition = %lld at nanoTime %llu",
+//         (long long)framePosition,
+//         (long long)nanoTime);
+
+    int64_t expectedNanosDelta = convertDeltaPositionToTime(framesDelta);
+//    ALOGI("processTimestamp() - expectedNanosDelta = %lld, nanosDelta = %llu",
+//         (long long)expectedNanosDelta,
+//         (long long)nanosDelta);
+
+//    ALOGI("processTimestamp() - mSampleRate = %d", mSampleRate);
+//    ALOGI("processTimestamp() - mState = %d", mState);
+    switch (mState) {
+    case STATE_STOPPED:
+        break;
+    case STATE_STARTING:
+        mMarkerFramePosition = framePosition;
+        mMarkerNanoTime = nanoTime;
+        mState = STATE_SYNCING;
+        break;
+    case STATE_SYNCING:
+        // This will handle a burst of rapid consumption in the beginning.
+        if (nanosDelta < expectedNanosDelta) {
+            mMarkerFramePosition = framePosition;
+            mMarkerNanoTime = nanoTime;
+        } else {
+            ALOGI("processTimestamp() - advance to STATE_RUNNING");
+            mState = STATE_RUNNING;
+        }
+        break;
+    case STATE_RUNNING:
+        if (nanosDelta < expectedNanosDelta) {
+            // Earlier than expected timestamp.
+            // This data is probably more accurate so use it.
+            // or we may be drifting due to a slow HW clock.
+            mMarkerFramePosition = framePosition;
+            mMarkerNanoTime = nanoTime;
+            ALOGI("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
+                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
+        } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
+            // Later than expected timestamp.
+            mMarkerFramePosition = framePosition;
+            mMarkerNanoTime = nanoTime - mMaxLatenessInNanos;
+            ALOGI("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
+                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
+                 (int) (mMaxLatenessInNanos / 1000));
+        }
+        break;
+    default:
+        break;
+    }
+    ++mTimestampCount;
+}
+
+void IsochronousClockModel::setSampleRate(int32_t sampleRate) {
+    mSampleRate = sampleRate;
+    update();
+}
+
+void IsochronousClockModel::setFramesPerBurst(int32_t framesPerBurst) {
+    mFramesPerBurst = framesPerBurst;
+    update();
+}
+
+void IsochronousClockModel::update() {
+    int64_t nanosLate = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
+    mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
+}
+
+aaudio_nanoseconds_t IsochronousClockModel::convertDeltaPositionToTime(
+        aaudio_position_frames_t framesDelta) const {
+    return (AAUDIO_NANOS_PER_SECOND * framesDelta) / mSampleRate;
+}
+
+int64_t IsochronousClockModel::convertDeltaTimeToPosition(aaudio_nanoseconds_t nanosDelta) const {
+    return (mSampleRate * nanosDelta) / AAUDIO_NANOS_PER_SECOND;
+}
+
+aaudio_nanoseconds_t IsochronousClockModel::convertPositionToTime(
+        aaudio_position_frames_t framePosition) const {
+    if (mState == STATE_STOPPED) {
+        return mMarkerNanoTime;
+    }
+    aaudio_position_frames_t nextBurstIndex = (framePosition + mFramesPerBurst - 1) / mFramesPerBurst;
+    aaudio_position_frames_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
+    aaudio_position_frames_t framesDelta = nextBurstPosition - mMarkerFramePosition;
+    aaudio_nanoseconds_t nanosDelta = convertDeltaPositionToTime(framesDelta);
+    aaudio_nanoseconds_t time = (aaudio_nanoseconds_t) (mMarkerNanoTime + nanosDelta);
+//    ALOGI("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
+//         (unsigned long long)framePosition,
+//         (unsigned long long)time);
+    return time;
+}
+
+aaudio_position_frames_t IsochronousClockModel::convertTimeToPosition(
+        aaudio_nanoseconds_t nanoTime) const {
+    if (mState == STATE_STOPPED) {
+        return mMarkerFramePosition;
+    }
+    aaudio_nanoseconds_t nanosDelta = nanoTime - mMarkerNanoTime;
+    aaudio_position_frames_t framesDelta = convertDeltaTimeToPosition(nanosDelta);
+    aaudio_position_frames_t nextBurstPosition = mMarkerFramePosition + framesDelta;
+    aaudio_position_frames_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
+    aaudio_position_frames_t position = nextBurstIndex * mFramesPerBurst;
+//    ALOGI("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
+//         (unsigned long long)nanoTime,
+//         (unsigned long long)position);
+//    ALOGI("IsochronousClockModel::convertTimeToPosition: framesDelta = %llu, mFramesPerBurst = %d",
+//         (long long) framesDelta, mFramesPerBurst);
+    return position;
+}
diff --git a/media/liboboe/src/client/IsochronousClockModel.h b/media/liboboe/src/client/IsochronousClockModel.h
new file mode 100644
index 0000000..b188a3d
--- /dev/null
+++ b/media/liboboe/src/client/IsochronousClockModel.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_ISOCHRONOUSCLOCKMODEL_H
+#define AAUDIO_ISOCHRONOUSCLOCKMODEL_H
+
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+namespace aaudio {
+
+/**
+ * Model an isochronous data stream using occasional timestamps as input.
+ * This can be used to predict the position of the stream at a given time.
+ *
+ * This class is not thread safe and should only be called from one thread.
+ */
+class IsochronousClockModel {
+
+public:
+    IsochronousClockModel();
+    virtual ~IsochronousClockModel();
+
+    void start(aaudio_nanoseconds_t nanoTime);
+    void stop(aaudio_nanoseconds_t nanoTime);
+
+    void processTimestamp(aaudio_position_frames_t framePosition, aaudio_nanoseconds_t nanoTime);
+
+    /**
+     * @param sampleRate rate of the stream in frames per second
+     */
+    void setSampleRate(aaudio_sample_rate_t sampleRate);
+
+    aaudio_sample_rate_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    /**
+     * This must be set accurately in order to track the isochronous stream.
+     *
+     * @param framesPerBurst number of frames that stream advance at one time.
+     */
+    void setFramesPerBurst(aaudio_size_frames_t framesPerBurst);
+
+    aaudio_size_frames_t getFramesPerBurst() const {
+        return mFramesPerBurst;
+    }
+
+    /**
+     * Calculate an estimated time when the stream will be at that position.
+     *
+     * @param framePosition position of the stream in frames
+     * @return time in nanoseconds
+     */
+    aaudio_nanoseconds_t convertPositionToTime(aaudio_position_frames_t framePosition) const;
+
+    /**
+     * Calculate an estimated position where the stream will be at the specified time.
+     *
+     * @param nanoTime time of interest
+     * @return position in frames
+     */
+    aaudio_position_frames_t convertTimeToPosition(aaudio_nanoseconds_t nanoTime) const;
+
+    /**
+     * @param framesDelta difference in frames
+     * @return duration in nanoseconds
+     */
+    aaudio_nanoseconds_t convertDeltaPositionToTime(aaudio_position_frames_t framesDelta) const;
+
+    /**
+     * @param nanosDelta duration in nanoseconds
+     * @return frames that stream will advance in that time
+     */
+    aaudio_position_frames_t convertDeltaTimeToPosition(aaudio_nanoseconds_t nanosDelta) const;
+
+private:
+    enum clock_model_state_t {
+        STATE_STOPPED,
+        STATE_STARTING,
+        STATE_SYNCING,
+        STATE_RUNNING
+    };
+
+    aaudio_sample_rate_t     mSampleRate;
+    aaudio_size_frames_t     mFramesPerBurst;
+    int32_t                mMaxLatenessInNanos;
+    aaudio_position_frames_t mMarkerFramePosition;
+    aaudio_nanoseconds_t     mMarkerNanoTime;
+    int32_t                mTimestampCount;
+    clock_model_state_t     mState;
+
+    void update();
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_ISOCHRONOUSCLOCKMODEL_H
diff --git a/media/liboboe/src/core/AAudioAudio.cpp b/media/liboboe/src/core/AAudioAudio.cpp
new file mode 100644
index 0000000..c1fa7cf
--- /dev/null
+++ b/media/liboboe/src/core/AAudioAudio.cpp
@@ -0,0 +1,562 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <time.h>
+#include <pthread.h>
+
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "AudioClock.h"
+#include "client/AudioStreamInternal.h"
+#include "HandleTracker.h"
+
+using namespace aaudio;
+
+// This is not the maximum theoretic possible number of handles that the HandlerTracker
+// class could support; instead it is the maximum number of handles that we are configuring
+// for our HandleTracker instance (sHandleTracker).
+#define AAUDIO_MAX_HANDLES  64
+
+// Macros for common code that includes a return.
+// TODO Consider using do{}while(0) construct. I tried but it hung AndroidStudio
+#define CONVERT_BUILDER_HANDLE_OR_RETURN() \
+    convertAAudioBuilderToStreamBuilder(builder); \
+    if (streamBuilder == nullptr) { \
+        return AAUDIO_ERROR_INVALID_HANDLE; \
+    }
+
+#define COMMON_GET_FROM_BUILDER_OR_RETURN(resultPtr) \
+    CONVERT_BUILDER_HANDLE_OR_RETURN() \
+    if ((resultPtr) == nullptr) { \
+        return AAUDIO_ERROR_NULL; \
+    }
+
+#define CONVERT_STREAM_HANDLE_OR_RETURN() \
+    convertAAudioStreamToAudioStream(stream); \
+    if (audioStream == nullptr) { \
+        return AAUDIO_ERROR_INVALID_HANDLE; \
+    }
+
+#define COMMON_GET_FROM_STREAM_OR_RETURN(resultPtr) \
+    CONVERT_STREAM_HANDLE_OR_RETURN(); \
+    if ((resultPtr) == nullptr) { \
+        return AAUDIO_ERROR_NULL; \
+    }
+
+// Static data.
+// TODO static constructors are discouraged, alternatives?
+static HandleTracker sHandleTracker(AAUDIO_MAX_HANDLES);
+
+typedef enum
+{
+    AAUDIO_HANDLE_TYPE_STREAM,
+    AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
+    AAUDIO_HANDLE_TYPE_COUNT
+} aaudio_handle_type_t;
+static_assert(AAUDIO_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
+
+
+#define AAUDIO_CASE_ENUM(name) case name: return #name
+
+AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) {
+    switch (returnCode) {
+        AAUDIO_CASE_ENUM(AAUDIO_OK);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_ILLEGAL_ARGUMENT);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INCOMPATIBLE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INTERNAL);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_STATE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_HANDLE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_QUERY);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNIMPLEMENTED);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNAVAILABLE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_FREE_HANDLES);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_MEMORY);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NULL);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_TIMEOUT);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_WOULD_BLOCK);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_ORDER);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_OUT_OF_RANGE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_SERVICE);
+    }
+    return "Unrecognized AAudio error.";
+}
+
+AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state) {
+    switch (state) {
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNINITIALIZED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_OPEN);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSED);
+    }
+    return "Unrecognized AAudio state.";
+}
+
+#undef AAUDIO_CASE_ENUM
+
+static AudioStream *convertAAudioStreamToAudioStream(AAudioStream stream)
+{
+    return (AudioStream *) sHandleTracker.get(AAUDIO_HANDLE_TYPE_STREAM,
+                                              (aaudio_handle_t) stream);
+}
+
+static AudioStreamBuilder *convertAAudioBuilderToStreamBuilder(AAudioStreamBuilder builder)
+{
+    return (AudioStreamBuilder *) sHandleTracker.get(AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
+                                                     (aaudio_handle_t) builder);
+}
+
+AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder *builder)
+{
+    ALOGD("AAudio_createStreamBuilder(): check sHandleTracker.isInitialized ()");
+    if (!sHandleTracker.isInitialized()) {
+        return AAUDIO_ERROR_NO_MEMORY;
+    }
+    AudioStreamBuilder *audioStreamBuilder =  new AudioStreamBuilder();
+    if (audioStreamBuilder == nullptr) {
+        return AAUDIO_ERROR_NO_MEMORY;
+    }
+    ALOGD("AAudio_createStreamBuilder(): created AudioStreamBuilder = %p", audioStreamBuilder);
+    // TODO protect the put() with a Mutex
+    AAudioStreamBuilder handle = sHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM_BUILDER,
+            audioStreamBuilder);
+    if (handle < 0) {
+        delete audioStreamBuilder;
+        return static_cast<aaudio_result_t>(handle);
+    } else {
+        *builder = handle;
+    }
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder builder,
+                                                     aaudio_device_id_t deviceId)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    streamBuilder->setDeviceId(deviceId);
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDeviceId(AAudioStreamBuilder builder,
+                                              aaudio_device_id_t *deviceId)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(deviceId);
+    *deviceId = streamBuilder->getDeviceId();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder builder,
+                                              aaudio_sample_rate_t sampleRate)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    streamBuilder->setSampleRate(sampleRate);
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSampleRate(AAudioStreamBuilder builder,
+                                              aaudio_sample_rate_t *sampleRate)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sampleRate);
+    *sampleRate = streamBuilder->getSampleRate();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder builder,
+                                                   int32_t samplesPerFrame)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    streamBuilder->setSamplesPerFrame(samplesPerFrame);
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSamplesPerFrame(AAudioStreamBuilder builder,
+                                                   int32_t *samplesPerFrame)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(samplesPerFrame);
+    *samplesPerFrame = streamBuilder->getSamplesPerFrame();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setDirection(AAudioStreamBuilder builder,
+                                             aaudio_direction_t direction)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    streamBuilder->setDirection(direction);
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getDirection(AAudioStreamBuilder builder,
+                                             aaudio_direction_t *direction)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(direction);
+    *direction = streamBuilder->getDirection();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setFormat(AAudioStreamBuilder builder,
+                                                   aaudio_audio_format_t format)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    streamBuilder->setFormat(format);
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getFormat(AAudioStreamBuilder builder,
+                                                   aaudio_audio_format_t *format)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(format);
+    *format = streamBuilder->getFormat();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder builder,
+                                                        aaudio_sharing_mode_t sharingMode)
+{
+    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
+    if ((sharingMode < 0) || (sharingMode >= AAUDIO_SHARING_MODE_COUNT)) {
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    } else {
+        streamBuilder->setSharingMode(sharingMode);
+        return AAUDIO_OK;
+    }
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_getSharingMode(AAudioStreamBuilder builder,
+                                                        aaudio_sharing_mode_t *sharingMode)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sharingMode);
+    *sharingMode = streamBuilder->getSharingMode();
+    return AAUDIO_OK;
+}
+
+static aaudio_result_t  AAudioInternal_openStream(AudioStreamBuilder *streamBuilder,
+                                              AAudioStream *streamPtr)
+{
+    AudioStream *audioStream = nullptr;
+    aaudio_result_t result = streamBuilder->build(&audioStream);
+    if (result != AAUDIO_OK) {
+        return result;
+    } else {
+        // Create a handle for referencing the object.
+        // TODO protect the put() with a Mutex
+        AAudioStream handle = sHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, audioStream);
+        if (handle < 0) {
+            delete audioStream;
+            return static_cast<aaudio_result_t>(handle);
+        }
+        *streamPtr = handle;
+        return AAUDIO_OK;
+    }
+}
+
+AAUDIO_API aaudio_result_t  AAudioStreamBuilder_openStream(AAudioStreamBuilder builder,
+                                                     AAudioStream *streamPtr)
+{
+    ALOGD("AAudioStreamBuilder_openStream(): builder = 0x%08X", builder);
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
+    return AAudioInternal_openStream(streamBuilder, streamPtr);
+}
+
+AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder builder)
+{
+    AudioStreamBuilder *streamBuilder = (AudioStreamBuilder *)
+            sHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM_BUILDER, builder);
+    if (streamBuilder != nullptr) {
+        delete streamBuilder;
+        return AAUDIO_OK;
+    }
+    return AAUDIO_ERROR_INVALID_HANDLE;
+}
+
+AAUDIO_API aaudio_result_t  AAudioStream_close(AAudioStream stream)
+{
+    AudioStream *audioStream = (AudioStream *)
+            sHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM, (aaudio_handle_t)stream);
+    ALOGD("AAudioStream_close(0x%08X), audioStream = %p", stream, audioStream);
+    if (audioStream != nullptr) {
+        audioStream->close();
+        delete audioStream;
+        return AAUDIO_OK;
+    }
+    return AAUDIO_ERROR_INVALID_HANDLE;
+}
+
+AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream stream)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    ALOGD("AAudioStream_requestStart(0x%08X), audioStream = %p", stream, audioStream);
+    return audioStream->requestStart();
+}
+
+AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream stream)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    ALOGD("AAudioStream_requestPause(0x%08X), audioStream = %p", stream, audioStream);
+    return audioStream->requestPause();
+}
+
+AAUDIO_API aaudio_result_t  AAudioStream_requestFlush(AAudioStream stream)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    ALOGD("AAudioStream_requestFlush(0x%08X), audioStream = %p", stream, audioStream);
+    return audioStream->requestFlush();
+}
+
+AAUDIO_API aaudio_result_t  AAudioStream_requestStop(AAudioStream stream)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    ALOGD("AAudioStream_requestStop(0x%08X), audioStream = %p", stream, audioStream);
+    return audioStream->requestStop();
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream stream,
+                                            aaudio_stream_state_t inputState,
+                                            aaudio_stream_state_t *nextState,
+                                            aaudio_nanoseconds_t timeoutNanoseconds)
+{
+
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
+}
+
+// ============================================================
+// Stream - non-blocking I/O
+// ============================================================
+
+AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream stream,
+                               void *buffer,
+                               aaudio_size_frames_t numFrames,
+                               aaudio_nanoseconds_t timeoutNanoseconds)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    if (buffer == nullptr) {
+        return AAUDIO_ERROR_NULL;
+    }
+    if (numFrames < 0) {
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    } else if (numFrames == 0) {
+        return 0;
+    }
+
+    aaudio_result_t result = audioStream->read(buffer, numFrames, timeoutNanoseconds);
+    // ALOGD("AAudioStream_read(): read returns %d", result);
+
+    return result;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream stream,
+                               const void *buffer,
+                               aaudio_size_frames_t numFrames,
+                               aaudio_nanoseconds_t timeoutNanoseconds)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    if (buffer == nullptr) {
+        return AAUDIO_ERROR_NULL;
+    }
+    if (numFrames < 0) {
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    } else if (numFrames == 0) {
+        return 0;
+    }
+
+    aaudio_result_t result = audioStream->write(buffer, numFrames, timeoutNanoseconds);
+    // ALOGD("AAudioStream_write(): write returns %d", result);
+
+    return result;
+}
+
+// ============================================================
+// Miscellaneous
+// ============================================================
+
+AAUDIO_API aaudio_result_t AAudioStream_createThread(AAudioStream stream,
+                                     aaudio_nanoseconds_t periodNanoseconds,
+                                     aaudio_audio_thread_proc_t *threadProc, void *arg)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    return audioStream->createThread(periodNanoseconds, threadProc, arg);
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_joinThread(AAudioStream stream,
+                                   void **returnArg,
+                                   aaudio_nanoseconds_t timeoutNanoseconds)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    return audioStream->joinThread(returnArg, timeoutNanoseconds);
+}
+
+// ============================================================
+// Stream - queries
+// ============================================================
+
+// TODO Use aaudio_clockid_t all the way down through the C++ streams.
+static clockid_t AAudioConvert_fromAAudioClockId(aaudio_clockid_t clockid)
+{
+    clockid_t hostClockId;
+    switch (clockid) {
+        case AAUDIO_CLOCK_MONOTONIC:
+            hostClockId = CLOCK_MONOTONIC;
+            break;
+        case AAUDIO_CLOCK_BOOTTIME:
+            hostClockId = CLOCK_BOOTTIME;
+            break;
+        default:
+            hostClockId = 0; // TODO review
+    }
+    return hostClockId;
+}
+
+aaudio_nanoseconds_t AAudio_getNanoseconds(aaudio_clockid_t clockid)
+{
+    clockid_t hostClockId = AAudioConvert_fromAAudioClockId(clockid);
+   return AudioClock::getNanoseconds(hostClockId);
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getSampleRate(AAudioStream stream, aaudio_sample_rate_t *sampleRate)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sampleRate);
+    *sampleRate = audioStream->getSampleRate();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getSamplesPerFrame(AAudioStream stream, int32_t *samplesPerFrame)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(samplesPerFrame);
+    *samplesPerFrame = audioStream->getSamplesPerFrame();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getState(AAudioStream stream, aaudio_stream_state_t *state)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(state);
+    *state = audioStream->getState();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getFormat(AAudioStream stream, aaudio_audio_format_t *format)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(format);
+    *format = audioStream->getFormat();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_setBufferSize(AAudioStream stream,
+                                                aaudio_size_frames_t requestedFrames,
+                                                aaudio_size_frames_t *actualFrames)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    return audioStream->setBufferSize(requestedFrames, actualFrames);
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getBufferSize(AAudioStream stream, aaudio_size_frames_t *frames)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
+    *frames = audioStream->getBufferSize();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getDirection(AAudioStream stream, int32_t *direction)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(direction);
+    *direction = audioStream->getDirection();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getFramesPerBurst(AAudioStream stream,
+                                                    aaudio_size_frames_t *framesPerBurst)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(framesPerBurst);
+    *framesPerBurst = audioStream->getFramesPerBurst();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getBufferCapacity(AAudioStream stream,
+                                           aaudio_size_frames_t *capacity)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(capacity);
+    *capacity = audioStream->getBufferCapacity();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getXRunCount(AAudioStream stream, int32_t *xRunCount)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(xRunCount);
+    *xRunCount = audioStream->getXRunCount();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getDeviceId(AAudioStream stream,
+                                                 aaudio_device_id_t *deviceId)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(deviceId);
+    *deviceId = audioStream->getDeviceId();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getSharingMode(AAudioStream stream,
+                                                 aaudio_sharing_mode_t *sharingMode)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sharingMode);
+    *sharingMode = audioStream->getSharingMode();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getFramesWritten(AAudioStream stream,
+                                                   aaudio_position_frames_t *frames)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
+    *frames = audioStream->getFramesWritten();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getFramesRead(AAudioStream stream, aaudio_position_frames_t *frames)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
+    *frames = audioStream->getFramesRead();
+    return AAUDIO_OK;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream stream,
+                                      aaudio_clockid_t clockid,
+                                      aaudio_position_frames_t *framePosition,
+                                      aaudio_nanoseconds_t *timeNanoseconds)
+{
+    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
+    if (framePosition == nullptr) {
+        return AAUDIO_ERROR_NULL;
+    } else if (timeNanoseconds == nullptr) {
+        return AAUDIO_ERROR_NULL;
+    } else if (clockid != AAUDIO_CLOCK_MONOTONIC && clockid != AAUDIO_CLOCK_BOOTTIME) {
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    }
+
+    clockid_t hostClockId = AAudioConvert_fromAAudioClockId(clockid);
+    return audioStream->getTimestamp(hostClockId, framePosition, timeNanoseconds);
+}
diff --git a/media/liboboe/src/core/AudioStream.cpp b/media/liboboe/src/core/AudioStream.cpp
index f154002..77d3cc0 100644
--- a/media/liboboe/src/core/AudioStream.cpp
+++ b/media/liboboe/src/core/AudioStream.cpp
@@ -14,26 +14,27 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "OboeAudio"
+#define LOG_TAG "AAudio"
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#include <atomic>
 #include <stdint.h>
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudio.h>
 
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
 #include "AudioClock.h"
 
-using namespace oboe;
+using namespace aaudio;
 
-/*
- * AudioStream
- */
 AudioStream::AudioStream() {
+    // mThread is a pthread_t of unknown size so we need memset.
+    memset(&mThread, 0, sizeof(mThread));
+    setPeriodNanoseconds(0);
 }
 
-oboe_result_t AudioStream::open(const AudioStreamBuilder& builder)
+aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
 {
     // TODO validate parameters.
     // Copy parameters from the Builder because the Builder may be deleted after this call.
@@ -42,41 +43,41 @@
     mDeviceId = builder.getDeviceId();
     mFormat = builder.getFormat();
     mSharingMode = builder.getSharingMode();
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
 AudioStream::~AudioStream() {
     close();
 }
 
-oboe_result_t AudioStream::waitForStateTransition(oboe_stream_state_t startingState,
-                                               oboe_stream_state_t endingState,
-                                               oboe_nanoseconds_t timeoutNanoseconds)
+aaudio_result_t AudioStream::waitForStateTransition(aaudio_stream_state_t startingState,
+                                               aaudio_stream_state_t endingState,
+                                               aaudio_nanoseconds_t timeoutNanoseconds)
 {
-    oboe_stream_state_t state = getState();
-    oboe_stream_state_t nextState = state;
+    aaudio_stream_state_t state = getState();
+    aaudio_stream_state_t nextState = state;
     if (state == startingState && state != endingState) {
-        oboe_result_t result = waitForStateChange(state, &nextState, timeoutNanoseconds);
-        if (result != OBOE_OK) {
+        aaudio_result_t result = waitForStateChange(state, &nextState, timeoutNanoseconds);
+        if (result != AAUDIO_OK) {
             return result;
         }
     }
 // It's OK if the expected transition has already occurred.
 // But if we reach an unexpected state then that is an error.
     if (nextState != endingState) {
-        return OBOE_ERROR_UNEXPECTED_STATE;
+        return AAUDIO_ERROR_UNEXPECTED_STATE;
     } else {
-        return OBOE_OK;
+        return AAUDIO_OK;
     }
 }
 
-oboe_result_t AudioStream::waitForStateChange(oboe_stream_state_t currentState,
-                                                oboe_stream_state_t *nextState,
-                                                oboe_nanoseconds_t timeoutNanoseconds)
+aaudio_result_t AudioStream::waitForStateChange(aaudio_stream_state_t currentState,
+                                                aaudio_stream_state_t *nextState,
+                                                aaudio_nanoseconds_t timeoutNanoseconds)
 {
     // TODO replace this when similar functionality added to AudioTrack.cpp
-    oboe_nanoseconds_t durationNanos = 20 * OBOE_NANOS_PER_MILLISECOND;
-    oboe_stream_state_t state = getState();
+    aaudio_nanoseconds_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND;
+    aaudio_stream_state_t state = getState();
     while (state == currentState && timeoutNanoseconds > 0) {
         if (durationNanos > timeoutNanoseconds) {
             durationNanos = timeoutNanoseconds;
@@ -84,41 +85,69 @@
         AudioClock::sleepForNanos(durationNanos);
         timeoutNanoseconds -= durationNanos;
 
-        oboe_result_t result = updateState();
-        if (result != OBOE_OK) {
+        aaudio_result_t result = updateState();
+        if (result != AAUDIO_OK) {
             return result;
         }
 
         state = getState();
     }
-    if (nextState != NULL) {
+    if (nextState != nullptr) {
         *nextState = state;
     }
-    return (state == currentState) ? OBOE_ERROR_TIMEOUT : OBOE_OK;
+    return (state == currentState) ? AAUDIO_ERROR_TIMEOUT : AAUDIO_OK;
 }
 
-oboe_result_t AudioStream::createThread(oboe_nanoseconds_t periodNanoseconds,
-                                     void *(*startRoutine)(void *), void *arg)
+// This registers the app's background audio thread with the server before
+// passing control to the app. This gives the server an opportunity to boost
+// the thread's performance characteristics.
+void* AudioStream::wrapUserThread() {
+    void* procResult = nullptr;
+    mThreadRegistrationResult = registerThread();
+    if (mThreadRegistrationResult == AAUDIO_OK) {
+        // Call application procedure. This may take a very long time.
+        procResult = mThreadProc(mThreadArg);
+        ALOGD("AudioStream::mThreadProc() returned");
+        mThreadRegistrationResult = unregisterThread();
+    }
+    return procResult;
+}
+
+// This is the entry point for the new thread created by createThread().
+// It converts the 'C' function call to a C++ method call.
+static void* AudioStream_internalThreadProc(void* threadArg) {
+    AudioStream *audioStream = (AudioStream *) threadArg;
+    return audioStream->wrapUserThread();
+}
+
+aaudio_result_t AudioStream::createThread(aaudio_nanoseconds_t periodNanoseconds,
+                                     aaudio_audio_thread_proc_t *threadProc,
+                                     void* threadArg)
 {
     if (mHasThread) {
-        return OBOE_ERROR_INVALID_STATE;
+        return AAUDIO_ERROR_INVALID_STATE;
     }
-    if (startRoutine == NULL) {
-        return OBOE_ERROR_NULL;
+    if (threadProc == nullptr) {
+        return AAUDIO_ERROR_NULL;
     }
-    int err = pthread_create(&mThread, NULL, startRoutine, arg);
+    // Pass input parameters to the background thread.
+    mThreadProc = threadProc;
+    mThreadArg = threadArg;
+    setPeriodNanoseconds(periodNanoseconds);
+    int err = pthread_create(&mThread, nullptr, AudioStream_internalThreadProc, this);
     if (err != 0) {
-        return OBOE_ERROR_INTERNAL;
+        // TODO convert errno to aaudio_result_t
+        return AAUDIO_ERROR_INTERNAL;
     } else {
         mHasThread = true;
-        return OBOE_OK;
+        return AAUDIO_OK;
     }
 }
 
-oboe_result_t AudioStream::joinThread(void **returnArg, oboe_nanoseconds_t timeoutNanoseconds)
+aaudio_result_t AudioStream::joinThread(void** returnArg, aaudio_nanoseconds_t timeoutNanoseconds)
 {
     if (!mHasThread) {
-        return OBOE_ERROR_INVALID_STATE;
+        return AAUDIO_ERROR_INVALID_STATE;
     }
 #if 0
     // TODO implement equivalent of pthread_timedjoin_np()
@@ -128,7 +157,7 @@
     int err = pthread_join(mThread, returnArg);
 #endif
     mHasThread = false;
-    // TODO Just leaked a thread?
-    return err ? OBOE_ERROR_INTERNAL : OBOE_OK;
+    // TODO convert errno to aaudio_result_t
+    return err ? AAUDIO_ERROR_INTERNAL : mThreadRegistrationResult;
 }
 
diff --git a/media/liboboe/src/core/AudioStream.h b/media/liboboe/src/core/AudioStream.h
index 8cbb091..8e4aa05 100644
--- a/media/liboboe/src/core/AudioStream.h
+++ b/media/liboboe/src/core/AudioStream.h
@@ -14,21 +14,23 @@
  * limitations under the License.
  */
 
-#ifndef OBOE_AUDIOSTREAM_H
-#define OBOE_AUDIOSTREAM_H
+#ifndef AAUDIO_AUDIOSTREAM_H
+#define AAUDIO_AUDIOSTREAM_H
 
-#include <unistd.h>
-#include <sys/types.h>
-#include <oboe/OboeAudio.h>
-#include "OboeUtilities.h"
+#include <atomic>
+#include <stdint.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#include "AAudioUtilities.h"
 #include "MonotonicCounter.h"
 
-namespace oboe {
+namespace aaudio {
 
 class AudioStreamBuilder;
 
 /**
- * Oboe audio stream.
+ * AAudio audio stream.
  */
 class AudioStream {
 public:
@@ -43,155 +45,170 @@
     /* Asynchronous requests.
      * Use waitForStateChange() to wait for completion.
      */
-    virtual oboe_result_t requestStart() = 0;
-    virtual oboe_result_t requestPause() = 0;
-    virtual oboe_result_t requestFlush() = 0;
-    virtual oboe_result_t requestStop() = 0;
+    virtual aaudio_result_t requestStart() = 0;
+    virtual aaudio_result_t requestPause() = 0;
+    virtual aaudio_result_t requestFlush() = 0;
+    virtual aaudio_result_t requestStop() = 0;
 
-    // TODO use oboe_clockid_t all the way down to AudioClock
-    virtual oboe_result_t getTimestamp(clockid_t clockId,
-                                       oboe_position_frames_t *framePosition,
-                                       oboe_nanoseconds_t *timeNanoseconds) = 0;
+    // TODO use aaudio_clockid_t all the way down to AudioClock
+    virtual aaudio_result_t getTimestamp(clockid_t clockId,
+                                       aaudio_position_frames_t *framePosition,
+                                       aaudio_nanoseconds_t *timeNanoseconds) = 0;
 
 
-    virtual oboe_result_t updateState() = 0;
+    virtual aaudio_result_t updateState() = 0;
 
 
     // =========== End ABSTRACT methods ===========================
 
-    virtual oboe_result_t waitForStateChange(oboe_stream_state_t currentState,
-                                          oboe_stream_state_t *nextState,
-                                          oboe_nanoseconds_t timeoutNanoseconds);
+    virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
+                                          aaudio_stream_state_t *nextState,
+                                          aaudio_nanoseconds_t timeoutNanoseconds);
 
     /**
      * Open the stream using the parameters in the builder.
      * Allocate the necessary resources.
      */
-    virtual oboe_result_t open(const AudioStreamBuilder& builder);
+    virtual aaudio_result_t open(const AudioStreamBuilder& builder);
 
     /**
      * Close the stream and deallocate any resources from the open() call.
      * It is safe to call close() multiple times.
      */
-    virtual oboe_result_t close() {
-        return OBOE_OK;
+    virtual aaudio_result_t close() {
+        return AAUDIO_OK;
     }
 
-    virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
-                                        oboe_size_frames_t *actualFrames) {
-        return OBOE_ERROR_UNIMPLEMENTED;
+    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
+                                        aaudio_size_frames_t *actualFrames) {
+        return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    virtual oboe_result_t createThread(oboe_nanoseconds_t periodNanoseconds,
-                                     void *(*start_routine)(void *), void *arg);
+    virtual aaudio_result_t createThread(aaudio_nanoseconds_t periodNanoseconds,
+                                       aaudio_audio_thread_proc_t *threadProc,
+                                       void *threadArg);
 
-    virtual oboe_result_t joinThread(void **returnArg, oboe_nanoseconds_t timeoutNanoseconds);
+    virtual aaudio_result_t joinThread(void **returnArg, aaudio_nanoseconds_t timeoutNanoseconds);
+
+    virtual aaudio_result_t registerThread() {
+        return AAUDIO_OK;
+    }
+
+    virtual aaudio_result_t unregisterThread() {
+        return AAUDIO_OK;
+    }
+
+    /**
+     * Internal function used to call the audio thread passed by the user.
+     * It is unfortunately public because it needs to be called by a static 'C' function.
+     */
+    void* wrapUserThread();
 
     // ============== Queries ===========================
 
-    virtual oboe_stream_state_t getState() const {
+    virtual aaudio_stream_state_t getState() const {
         return mState;
     }
 
-    virtual oboe_size_frames_t getBufferSize() const {
-        return OBOE_ERROR_UNIMPLEMENTED;
+    virtual aaudio_size_frames_t getBufferSize() const {
+        return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    virtual oboe_size_frames_t getBufferCapacity() const {
-        return OBOE_ERROR_UNIMPLEMENTED;
+    virtual aaudio_size_frames_t getBufferCapacity() const {
+        return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    virtual oboe_size_frames_t getFramesPerBurst() const {
-        return OBOE_ERROR_UNIMPLEMENTED;
+    virtual aaudio_size_frames_t getFramesPerBurst() const {
+        return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
     virtual int32_t getXRunCount() const {
-        return OBOE_ERROR_UNIMPLEMENTED;
+        return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
     bool isPlaying() const {
-        return mState == OBOE_STREAM_STATE_STARTING || mState == OBOE_STREAM_STATE_STARTED;
+        return mState == AAUDIO_STREAM_STATE_STARTING || mState == AAUDIO_STREAM_STATE_STARTED;
     }
 
-    oboe_result_t getSampleRate() const {
+    aaudio_result_t getSampleRate() const {
         return mSampleRate;
     }
 
-    oboe_audio_format_t getFormat()  const {
+    aaudio_audio_format_t getFormat()  const {
         return mFormat;
     }
 
-    oboe_result_t getSamplesPerFrame() const {
+    aaudio_result_t getSamplesPerFrame() const {
         return mSamplesPerFrame;
     }
 
-    OboeDeviceId getDeviceId() const {
+    aaudio_device_id_t getDeviceId() const {
         return mDeviceId;
     }
 
-    oboe_sharing_mode_t getSharingMode() const {
+    aaudio_sharing_mode_t getSharingMode() const {
         return mSharingMode;
     }
 
-    oboe_direction_t getDirection() const {
+    aaudio_direction_t getDirection() const {
         return mDirection;
     }
 
-    oboe_size_bytes_t getBytesPerFrame() const {
+    aaudio_size_bytes_t getBytesPerFrame() const {
         return mSamplesPerFrame * getBytesPerSample();
     }
 
-    oboe_size_bytes_t getBytesPerSample() const {
-        return OboeConvert_formatToSizeInBytes(mFormat);
+    aaudio_size_bytes_t getBytesPerSample() const {
+        return AAudioConvert_formatToSizeInBytes(mFormat);
     }
 
-    virtual oboe_position_frames_t getFramesWritten() {
+    virtual aaudio_position_frames_t getFramesWritten() {
         return mFramesWritten.get();
     }
 
-    virtual oboe_position_frames_t getFramesRead() {
+    virtual aaudio_position_frames_t getFramesRead() {
         return mFramesRead.get();
     }
 
 
     // ============== I/O ===========================
     // A Stream will only implement read() or write() depending on its direction.
-    virtual oboe_result_t write(const void *buffer,
-                             oboe_size_frames_t numFrames,
-                             oboe_nanoseconds_t timeoutNanoseconds) {
-        return OBOE_ERROR_UNIMPLEMENTED;
+    virtual aaudio_result_t write(const void *buffer,
+                             aaudio_size_frames_t numFrames,
+                             aaudio_nanoseconds_t timeoutNanoseconds) {
+        return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    virtual oboe_result_t read(void *buffer,
-                            oboe_size_frames_t numFrames,
-                            oboe_nanoseconds_t timeoutNanoseconds) {
-        return OBOE_ERROR_UNIMPLEMENTED;
+    virtual aaudio_result_t read(void *buffer,
+                            aaudio_size_frames_t numFrames,
+                            aaudio_nanoseconds_t timeoutNanoseconds) {
+        return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
 protected:
 
-    virtual oboe_position_frames_t incrementFramesWritten(oboe_size_frames_t frames) {
-        return static_cast<oboe_position_frames_t>(mFramesWritten.increment(frames));
+    virtual aaudio_position_frames_t incrementFramesWritten(aaudio_size_frames_t frames) {
+        return static_cast<aaudio_position_frames_t>(mFramesWritten.increment(frames));
     }
 
-    virtual oboe_position_frames_t incrementFramesRead(oboe_size_frames_t frames) {
-        return static_cast<oboe_position_frames_t>(mFramesRead.increment(frames));
+    virtual aaudio_position_frames_t incrementFramesRead(aaudio_size_frames_t frames) {
+        return static_cast<aaudio_position_frames_t>(mFramesRead.increment(frames));
     }
 
     /**
      * Wait for a transition from one state to another.
-     * @return OBOE_OK if the endingState was observed, or OBOE_ERROR_UNEXPECTED_STATE
+     * @return AAUDIO_OK if the endingState was observed, or AAUDIO_ERROR_UNEXPECTED_STATE
      *   if any state that was not the startingState or endingState was observed
-     *   or OBOE_ERROR_TIMEOUT
+     *   or AAUDIO_ERROR_TIMEOUT
      */
-    virtual oboe_result_t waitForStateTransition(oboe_stream_state_t startingState,
-                                              oboe_stream_state_t endingState,
-                                              oboe_nanoseconds_t timeoutNanoseconds);
+    virtual aaudio_result_t waitForStateTransition(aaudio_stream_state_t startingState,
+                                              aaudio_stream_state_t endingState,
+                                              aaudio_nanoseconds_t timeoutNanoseconds);
 
     /**
      * This should not be called after the open() call.
      */
-    void setSampleRate(oboe_sample_rate_t sampleRate) {
+    void setSampleRate(aaudio_sample_rate_t sampleRate) {
         mSampleRate = sampleRate;
     }
 
@@ -205,38 +222,59 @@
     /**
      * This should not be called after the open() call.
      */
-    void setSharingMode(oboe_sharing_mode_t sharingMode) {
+    void setSharingMode(aaudio_sharing_mode_t sharingMode) {
         mSharingMode = sharingMode;
     }
 
     /**
      * This should not be called after the open() call.
      */
-    void setFormat(oboe_audio_format_t format) {
+    void setFormat(aaudio_audio_format_t format) {
         mFormat = format;
     }
 
-    void setState(oboe_stream_state_t state) {
+    void setState(aaudio_stream_state_t state) {
         mState = state;
     }
 
+
+
+protected:
     MonotonicCounter     mFramesWritten;
     MonotonicCounter     mFramesRead;
 
+    void setPeriodNanoseconds(aaudio_nanoseconds_t periodNanoseconds) {
+        mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
+    }
+
+    aaudio_nanoseconds_t getPeriodNanoseconds() {
+        return mPeriodNanoseconds.load(std::memory_order_acquire);
+    }
+
 private:
     // These do not change after open().
-    int32_t              mSamplesPerFrame = OBOE_UNSPECIFIED;
-    oboe_sample_rate_t   mSampleRate = OBOE_UNSPECIFIED;
-    oboe_stream_state_t  mState = OBOE_STREAM_STATE_UNINITIALIZED;
-    OboeDeviceId         mDeviceId = OBOE_UNSPECIFIED;
-    oboe_sharing_mode_t  mSharingMode = OBOE_SHARING_MODE_LEGACY;
-    oboe_audio_format_t  mFormat = OBOE_UNSPECIFIED;
-    oboe_direction_t     mDirection = OBOE_DIRECTION_OUTPUT;
+    int32_t              mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+    aaudio_sample_rate_t   mSampleRate = AAUDIO_UNSPECIFIED;
+    aaudio_stream_state_t  mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+    aaudio_device_id_t     mDeviceId = AAUDIO_UNSPECIFIED;
+    aaudio_sharing_mode_t  mSharingMode = AAUDIO_SHARING_MODE_LEGACY;
+    aaudio_audio_format_t  mFormat = AAUDIO_FORMAT_UNSPECIFIED;
+    aaudio_direction_t     mDirection = AAUDIO_DIRECTION_OUTPUT;
 
+    // background thread ----------------------------------
     bool                 mHasThread = false;
-    pthread_t            mThread;
+    pthread_t            mThread; // initialized in constructor
+
+    // These are set by the application thread and then read by the audio pthread.
+    std::atomic<aaudio_nanoseconds_t>  mPeriodNanoseconds; // for tuning SCHED_FIFO threads
+    // TODO make atomic?
+    aaudio_audio_thread_proc_t* mThreadProc = nullptr;
+    void*                mThreadArg = nullptr;
+    aaudio_result_t        mThreadRegistrationResult = AAUDIO_OK;
+
+
 };
 
-} /* namespace oboe */
+} /* namespace aaudio */
 
-#endif /* OBOE_AUDIOSTREAM_H */
+#endif /* AAUDIO_AUDIOSTREAM_H */
diff --git a/media/liboboe/src/core/AudioStreamBuilder.cpp b/media/liboboe/src/core/AudioStreamBuilder.cpp
index 56e6706..decd53c 100644
--- a/media/liboboe/src/core/AudioStreamBuilder.cpp
+++ b/media/liboboe/src/core/AudioStreamBuilder.cpp
@@ -14,17 +14,23 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "OboeAudio"
+#define LOG_TAG "AAudio"
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include <sys/types.h>
-#include "AudioStream.h"
-#include "AudioStreamBuilder.h"
-#include "AudioStreamRecord.h"
-#include "AudioStreamTrack.h"
+#include <new>
+#include <stdint.h>
 
-using namespace oboe;
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#include "client/AudioStreamInternal.h"
+#include "core/AudioStream.h"
+#include "core/AudioStreamBuilder.h"
+#include "legacy/AudioStreamRecord.h"
+#include "legacy/AudioStreamTrack.h"
+
+using namespace aaudio;
 
 /*
  * AudioStreamBuilder
@@ -35,47 +41,50 @@
 AudioStreamBuilder::~AudioStreamBuilder() {
 }
 
-oboe_result_t AudioStreamBuilder::build(AudioStream **streamPtr) {
+aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
     // TODO Is there a better place to put the code that decides which class to use?
-    AudioStream *audioStream = nullptr;
-    const oboe_sharing_mode_t sharingMode = getSharingMode();
+    AudioStream* audioStream = nullptr;
+    const aaudio_sharing_mode_t sharingMode = getSharingMode();
     switch (getDirection()) {
-    case OBOE_DIRECTION_INPUT:
+    case AAUDIO_DIRECTION_INPUT:
         switch (sharingMode) {
-            case OBOE_SHARING_MODE_LEGACY:
-                audioStream = new AudioStreamRecord();
+            case AAUDIO_SHARING_MODE_LEGACY:
+                audioStream = new(std::nothrow) AudioStreamRecord();
                 break;
             default:
                 ALOGE("AudioStreamBuilder(): bad sharing mode = %d", sharingMode);
-                return OBOE_ERROR_ILLEGAL_ARGUMENT;
+                return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
                 break;
         }
         break;
-    case OBOE_DIRECTION_OUTPUT:
+    case AAUDIO_DIRECTION_OUTPUT:
         switch (sharingMode) {
-            case OBOE_SHARING_MODE_LEGACY:
-                audioStream = new AudioStreamTrack();
+            case AAUDIO_SHARING_MODE_LEGACY:
+                audioStream = new(std::nothrow) AudioStreamTrack();
+                break;
+            case AAUDIO_SHARING_MODE_EXCLUSIVE:
+                audioStream = new(std::nothrow) AudioStreamInternal();
                 break;
             default:
                 ALOGE("AudioStreamBuilder(): bad sharing mode = %d", sharingMode);
-                return OBOE_ERROR_ILLEGAL_ARGUMENT;
+                return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
                 break;
         }
         break;
     default:
         ALOGE("AudioStreamBuilder(): bad direction = %d", getDirection());
-        return OBOE_ERROR_ILLEGAL_ARGUMENT;
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
         break;
     }
     if (audioStream == nullptr) {
-        return OBOE_ERROR_NO_MEMORY;
+        return AAUDIO_ERROR_NO_MEMORY;
     }
     ALOGD("AudioStreamBuilder(): created audioStream = %p", audioStream);
 
     // TODO maybe move this out of build and pass the builder to the constructors
     // Open the stream using the parameters from the builder.
-    const oboe_result_t result = audioStream->open(*this);
-    if (result != OBOE_OK) {
+    const aaudio_result_t result = audioStream->open(*this);
+    if (result != AAUDIO_OK) {
         delete audioStream;
     } else {
         *streamPtr = audioStream;
diff --git a/media/liboboe/src/core/AudioStreamBuilder.h b/media/liboboe/src/core/AudioStreamBuilder.h
index 3f98ebb..9e1a1a7 100644
--- a/media/liboboe/src/core/AudioStreamBuilder.h
+++ b/media/liboboe/src/core/AudioStreamBuilder.h
@@ -14,13 +14,17 @@
  * limitations under the License.
  */
 
-#ifndef OBOE_AUDIOSTREAMBUILDER_H
-#define OBOE_AUDIOSTREAMBUILDER_H
+#ifndef AAUDIO_AUDIOSTREAMBUILDER_H
+#define AAUDIO_AUDIOSTREAMBUILDER_H
 
-#include <oboe/OboeAudio.h>
+#include <stdint.h>
+
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
 #include "AudioStream.h"
 
-namespace oboe {
+namespace aaudio {
 
 /**
  * Factory class for an AudioStream.
@@ -38,67 +42,67 @@
     /**
      * This is also known as channelCount.
      */
-    AudioStreamBuilder *setSamplesPerFrame(int samplesPerFrame) {
+    AudioStreamBuilder* setSamplesPerFrame(int samplesPerFrame) {
         mSamplesPerFrame = samplesPerFrame;
         return this;
     }
 
-    oboe_direction_t getDirection() const {
+    aaudio_direction_t getDirection() const {
         return mDirection;
     }
 
-    AudioStreamBuilder *setDirection(oboe_direction_t direction) {
+    AudioStreamBuilder* setDirection(aaudio_direction_t direction) {
         mDirection = direction;
         return this;
     }
 
-    oboe_sample_rate_t getSampleRate() const {
+    aaudio_sample_rate_t getSampleRate() const {
         return mSampleRate;
     }
 
-    AudioStreamBuilder *setSampleRate(oboe_sample_rate_t sampleRate) {
+    AudioStreamBuilder* setSampleRate(aaudio_sample_rate_t sampleRate) {
         mSampleRate = sampleRate;
         return this;
     }
 
-    oboe_audio_format_t getFormat() const {
+    aaudio_audio_format_t getFormat() const {
         return mFormat;
     }
 
-    AudioStreamBuilder *setFormat(oboe_audio_format_t format) {
+    AudioStreamBuilder *setFormat(aaudio_audio_format_t format) {
         mFormat = format;
         return this;
     }
 
-    oboe_sharing_mode_t getSharingMode() const {
+    aaudio_sharing_mode_t getSharingMode() const {
         return mSharingMode;
     }
 
-    AudioStreamBuilder *setSharingMode(oboe_sharing_mode_t sharingMode) {
+    AudioStreamBuilder* setSharingMode(aaudio_sharing_mode_t sharingMode) {
         mSharingMode = sharingMode;
         return this;
     }
 
-    OboeDeviceId getDeviceId() const {
+    aaudio_device_id_t getDeviceId() const {
         return mDeviceId;
     }
 
-    AudioStreamBuilder *setDeviceId(OboeDeviceId deviceId) {
+    AudioStreamBuilder* setDeviceId(aaudio_device_id_t deviceId) {
         mDeviceId = deviceId;
         return this;
     }
 
-    oboe_result_t build(AudioStream **streamPtr);
+    aaudio_result_t build(AudioStream **streamPtr);
 
 private:
-    int32_t              mSamplesPerFrame = OBOE_UNSPECIFIED;
-    oboe_sample_rate_t   mSampleRate = OBOE_UNSPECIFIED;
-    OboeDeviceId         mDeviceId = OBOE_UNSPECIFIED; // TODO need better default
-    oboe_sharing_mode_t  mSharingMode = OBOE_SHARING_MODE_LEGACY;
-    oboe_audio_format_t  mFormat = OBOE_UNSPECIFIED;
-    oboe_direction_t     mDirection = OBOE_DIRECTION_OUTPUT;
+    int32_t              mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+    aaudio_sample_rate_t   mSampleRate = AAUDIO_UNSPECIFIED;
+    aaudio_device_id_t     mDeviceId = AAUDIO_DEVICE_UNSPECIFIED;
+    aaudio_sharing_mode_t  mSharingMode = AAUDIO_SHARING_MODE_LEGACY;
+    aaudio_audio_format_t  mFormat = AAUDIO_FORMAT_UNSPECIFIED;
+    aaudio_direction_t     mDirection = AAUDIO_DIRECTION_OUTPUT;
 };
 
-} /* namespace oboe */
+} /* namespace aaudio */
 
-#endif /* OBOE_AUDIOSTREAMBUILDER_H */
+#endif /* AAUDIO_AUDIOSTREAMBUILDER_H */
diff --git a/media/liboboe/src/core/OboeAudio.cpp b/media/liboboe/src/core/OboeAudio.cpp
deleted file mode 100644
index a02f226..0000000
--- a/media/liboboe/src/core/OboeAudio.cpp
+++ /dev/null
@@ -1,555 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "OboeAudio"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <time.h>
-#include <pthread.h>
-
-#include <oboe/OboeDefinitions.h>
-#include <oboe/OboeAudio.h>
-#include "AudioStreamBuilder.h"
-#include "AudioStream.h"
-#include "AudioClock.h"
-#include "HandleTracker.h"
-
-// temporary, as I stage in the MMAP/NOIRQ support, do not review
-#ifndef OBOE_SUPPORT_MMAP
-#define OBOE_SUPPORT_MMAP 0
-#endif
-
-#if OBOE_SUPPORT_MMAP
-#include "AudioStreamInternal.h"
-#include "OboeServiceGateway.h"
-#endif
-
-using namespace oboe;
-
-// This is not the maximum theoretic possible number of handles that the HandlerTracker
-// class could support; instead it is the maximum number of handles that we are configuring
-// for our HandleTracker instance (sHandleTracker).
-#define OBOE_MAX_HANDLES  64
-
-// Macros for common code that includes a return.
-// TODO Consider using do{}while(0) construct. I tried but it hung AndroidStudio
-#define CONVERT_BUILDER_HANDLE_OR_RETURN() \
-    convertOboeBuilderToStreamBuilder(builder); \
-    if (streamBuilder == nullptr) { \
-        return OBOE_ERROR_INVALID_HANDLE; \
-    }
-
-#define COMMON_GET_FROM_BUILDER_OR_RETURN(resultPtr) \
-    CONVERT_BUILDER_HANDLE_OR_RETURN() \
-    if ((resultPtr) == nullptr) { \
-        return OBOE_ERROR_NULL; \
-    }
-
-#define CONVERT_STREAM_HANDLE_OR_RETURN() \
-    convertOboeStreamToAudioStream(stream); \
-    if (audioStream == nullptr) { \
-        return OBOE_ERROR_INVALID_HANDLE; \
-    }
-
-#define COMMON_GET_FROM_STREAM_OR_RETURN(resultPtr) \
-    CONVERT_STREAM_HANDLE_OR_RETURN(); \
-    if ((resultPtr) == nullptr) { \
-        return OBOE_ERROR_NULL; \
-    }
-
-static HandleTracker sHandleTracker(OBOE_MAX_HANDLES);
-
-typedef enum
-{
-    OBOE_HANDLE_TYPE_STREAM,
-    OBOE_HANDLE_TYPE_STREAM_BUILDER,
-    OBOE_HANDLE_TYPE_COUNT
-} oboe_handle_type_t;
-static_assert(OBOE_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
-
-#if OBOE_SUPPORT_MMAP
-static OboeServiceGateway sOboeServiceGateway;
-#endif
-
-#define OBOE_CASE_ENUM(name) case name: return #name
-
-OBOE_API const char * Oboe_convertResultToText(oboe_result_t returnCode) {
-    switch (returnCode) {
-        OBOE_CASE_ENUM(OBOE_OK);
-        OBOE_CASE_ENUM(OBOE_ERROR_ILLEGAL_ARGUMENT);
-        OBOE_CASE_ENUM(OBOE_ERROR_INCOMPATIBLE);
-        OBOE_CASE_ENUM(OBOE_ERROR_INTERNAL);
-        OBOE_CASE_ENUM(OBOE_ERROR_INVALID_STATE);
-        OBOE_CASE_ENUM(OBOE_ERROR_INVALID_HANDLE);
-        OBOE_CASE_ENUM(OBOE_ERROR_INVALID_QUERY);
-        OBOE_CASE_ENUM(OBOE_ERROR_UNIMPLEMENTED);
-        OBOE_CASE_ENUM(OBOE_ERROR_UNAVAILABLE);
-        OBOE_CASE_ENUM(OBOE_ERROR_NO_FREE_HANDLES);
-        OBOE_CASE_ENUM(OBOE_ERROR_NO_MEMORY);
-        OBOE_CASE_ENUM(OBOE_ERROR_NULL);
-        OBOE_CASE_ENUM(OBOE_ERROR_TIMEOUT);
-        OBOE_CASE_ENUM(OBOE_ERROR_WOULD_BLOCK);
-        OBOE_CASE_ENUM(OBOE_ERROR_INVALID_ORDER);
-        OBOE_CASE_ENUM(OBOE_ERROR_OUT_OF_RANGE);
-    }
-    return "Unrecognized Oboe error.";
-}
-
-OBOE_API const char * Oboe_convertStreamStateToText(oboe_stream_state_t state) {
-    switch (state) {
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_UNINITIALIZED);
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_OPEN);
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_STARTING);
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_STARTED);
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_PAUSING);
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_PAUSED);
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_FLUSHING);
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_FLUSHED);
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_STOPPING);
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_STOPPED);
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_CLOSING);
-        OBOE_CASE_ENUM(OBOE_STREAM_STATE_CLOSED);
-    }
-    return "Unrecognized Oboe state.";
-}
-
-#undef OBOE_CASE_ENUM
-
-static AudioStream *convertOboeStreamToAudioStream(OboeStream stream)
-{
-    return (AudioStream *) sHandleTracker.get(OBOE_HANDLE_TYPE_STREAM,
-                                              (oboe_handle_t) stream);
-}
-
-static AudioStreamBuilder *convertOboeBuilderToStreamBuilder(OboeStreamBuilder builder)
-{
-    return (AudioStreamBuilder *) sHandleTracker.get(OBOE_HANDLE_TYPE_STREAM_BUILDER,
-                                                     (oboe_handle_t) builder);
-}
-
-OBOE_API oboe_result_t Oboe_createStreamBuilder(OboeStreamBuilder *builder)
-{
-    ALOGD("Oboe_createStreamBuilder(): check sHandleTracker.isInitialized ()");
-    if (!sHandleTracker.isInitialized()) {
-        return OBOE_ERROR_NO_MEMORY;
-    }
-    AudioStreamBuilder *audioStreamBuilder =  new AudioStreamBuilder();
-    if (audioStreamBuilder == nullptr) {
-        return OBOE_ERROR_NO_MEMORY;
-    }
-    ALOGD("Oboe_createStreamBuilder(): created AudioStreamBuilder = %p", audioStreamBuilder);
-    // TODO protect the put() with a Mutex
-    OboeStreamBuilder handle = sHandleTracker.put(OBOE_HANDLE_TYPE_STREAM_BUILDER,
-            audioStreamBuilder);
-    if (handle < 0) {
-        delete audioStreamBuilder;
-        return static_cast<oboe_result_t>(handle);
-    } else {
-        *builder = handle;
-    }
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setDeviceId(OboeStreamBuilder builder,
-                                                     OboeDeviceId deviceId)
-{
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
-    streamBuilder->setDeviceId(deviceId);
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setSampleRate(OboeStreamBuilder builder,
-                                              oboe_sample_rate_t sampleRate)
-{
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
-    streamBuilder->setSampleRate(sampleRate);
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_getSampleRate(OboeStreamBuilder builder,
-                                              oboe_sample_rate_t *sampleRate)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sampleRate);
-    *sampleRate = streamBuilder->getSampleRate();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setSamplesPerFrame(OboeStreamBuilder builder,
-                                                   int32_t samplesPerFrame)
-{
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
-    streamBuilder->setSamplesPerFrame(samplesPerFrame);
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_getSamplesPerFrame(OboeStreamBuilder builder,
-                                                   int32_t *samplesPerFrame)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(samplesPerFrame);
-    *samplesPerFrame = streamBuilder->getSamplesPerFrame();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setDirection(OboeStreamBuilder builder,
-                                             oboe_direction_t direction)
-{
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
-    streamBuilder->setDirection(direction);
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_getDirection(OboeStreamBuilder builder,
-                                             oboe_direction_t *direction)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(direction);
-    *direction = streamBuilder->getDirection();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setFormat(OboeStreamBuilder builder,
-                                                   oboe_audio_format_t format)
-{
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
-    streamBuilder->setFormat(format);
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_getFormat(OboeStreamBuilder builder,
-                                                   oboe_audio_format_t *format)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(format);
-    *format = streamBuilder->getFormat();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_setSharingMode(OboeStreamBuilder builder,
-                                                        oboe_sharing_mode_t sharingMode)
-{
-    AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
-    if ((sharingMode < 0) || (sharingMode >= OBOE_SHARING_MODE_COUNT)) {
-        return OBOE_ERROR_ILLEGAL_ARGUMENT;
-    } else {
-        streamBuilder->setSharingMode(sharingMode);
-        return OBOE_OK;
-    }
-}
-
-OBOE_API oboe_result_t OboeStreamBuilder_getSharingMode(OboeStreamBuilder builder,
-                                                        oboe_sharing_mode_t *sharingMode)
-{
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(sharingMode);
-    *sharingMode = streamBuilder->getSharingMode();
-    return OBOE_OK;
-}
-
-static oboe_result_t  OboeInternal_openStream(AudioStreamBuilder *streamBuilder,
-                                              OboeStream *streamPtr)
-{
-    AudioStream *audioStream = nullptr;
-    oboe_result_t result = streamBuilder->build(&audioStream);
-    if (result != OBOE_OK) {
-        return result;
-    } else {
-        // Create a handle for referencing the object.
-        // TODO protect the put() with a Mutex
-        OboeStream handle = sHandleTracker.put(OBOE_HANDLE_TYPE_STREAM, audioStream);
-        if (handle < 0) {
-            delete audioStream;
-            return static_cast<oboe_result_t>(handle);
-        }
-        *streamPtr = handle;
-        return OBOE_OK;
-    }
-}
-
-OBOE_API oboe_result_t  OboeStreamBuilder_openStream(OboeStreamBuilder builder,
-                                                     OboeStream *streamPtr)
-{
-    ALOGD("OboeStreamBuilder_openStream(): builder = 0x%08X", builder);
-    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
-    return OboeInternal_openStream(streamBuilder, streamPtr);
-}
-
-OBOE_API oboe_result_t  OboeStreamBuilder_delete(OboeStreamBuilder builder)
-{
-    // TODO protect the remove() with a Mutex
-    AudioStreamBuilder *streamBuilder = (AudioStreamBuilder *)
-            sHandleTracker.remove(OBOE_HANDLE_TYPE_STREAM_BUILDER, builder);
-    if (streamBuilder != nullptr) {
-        delete streamBuilder;
-        return OBOE_OK;
-    }
-    return OBOE_ERROR_INVALID_HANDLE;
-}
-
-OBOE_API oboe_result_t  OboeStream_close(OboeStream stream)
-{
-    // TODO protect the remove() with a Mutex
-    AudioStream *audioStream = (AudioStream *)
-            sHandleTracker.remove(OBOE_HANDLE_TYPE_STREAM, (oboe_handle_t)stream);
-    if (audioStream != nullptr) {
-        audioStream->close();
-        delete audioStream;
-        return OBOE_OK;
-    }
-    return OBOE_ERROR_INVALID_HANDLE;
-}
-
-OBOE_API oboe_result_t  OboeStream_requestStart(OboeStream stream)
-{
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("OboeStream_requestStart(0x%08X), audioStream = %p", stream, audioStream);
-    return audioStream->requestStart();
-}
-
-OBOE_API oboe_result_t  OboeStream_requestPause(OboeStream stream)
-{
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("OboeStream_requestPause(0x%08X), audioStream = %p", stream, audioStream);
-    return audioStream->requestPause();
-}
-
-OBOE_API oboe_result_t  OboeStream_requestFlush(OboeStream stream)
-{
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("OboeStream_requestFlush(0x%08X), audioStream = %p", stream, audioStream);
-    return audioStream->requestFlush();
-}
-
-OBOE_API oboe_result_t  OboeStream_requestStop(OboeStream stream)
-{
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    ALOGD("OboeStream_requestStop(0x%08X), audioStream = %p", stream, audioStream);
-    return audioStream->requestStop();
-}
-
-OBOE_API oboe_result_t OboeStream_waitForStateChange(OboeStream stream,
-                                            oboe_stream_state_t inputState,
-                                            oboe_stream_state_t *nextState,
-                                            oboe_nanoseconds_t timeoutNanoseconds)
-{
-
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
-}
-
-// ============================================================
-// Stream - non-blocking I/O
-// ============================================================
-
-OBOE_API oboe_result_t OboeStream_read(OboeStream stream,
-                               void *buffer,
-                               oboe_size_frames_t numFrames,
-                               oboe_nanoseconds_t timeoutNanoseconds)
-{
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    if (buffer == nullptr) {
-        return OBOE_ERROR_NULL;
-    }
-    if (numFrames < 0) {
-        return OBOE_ERROR_ILLEGAL_ARGUMENT;
-    } else if (numFrames == 0) {
-        return 0;
-    }
-
-    oboe_result_t result = audioStream->read(buffer, numFrames, timeoutNanoseconds);
-    // ALOGD("OboeStream_read(): read returns %d", result);
-
-    return result;
-}
-
-OBOE_API oboe_result_t OboeStream_write(OboeStream stream,
-                               const void *buffer,
-                               oboe_size_frames_t numFrames,
-                               oboe_nanoseconds_t timeoutNanoseconds)
-{
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    if (buffer == nullptr) {
-        return OBOE_ERROR_NULL;
-    }
-    if (numFrames < 0) {
-        return OBOE_ERROR_ILLEGAL_ARGUMENT;
-    } else if (numFrames == 0) {
-        return 0;
-    }
-
-    oboe_result_t result = audioStream->write(buffer, numFrames, timeoutNanoseconds);
-    // ALOGD("OboeStream_write(): write returns %d", result);
-
-    return result;
-}
-
-// ============================================================
-// Miscellaneous
-// ============================================================
-
-OBOE_API oboe_result_t OboeStream_createThread(OboeStream stream,
-                                     oboe_nanoseconds_t periodNanoseconds,
-                                     void *(*startRoutine)(void *), void *arg)
-{
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    return audioStream->createThread(periodNanoseconds, startRoutine, arg);
-}
-
-OBOE_API oboe_result_t OboeStream_joinThread(OboeStream stream,
-                                   void **returnArg,
-                                   oboe_nanoseconds_t timeoutNanoseconds)
-{
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    return audioStream->joinThread(returnArg, timeoutNanoseconds);
-}
-
-// ============================================================
-// Stream - queries
-// ============================================================
-
-// TODO Use oboe_clockid_t all the way down through the C++ streams.
-static clockid_t OboeConvert_fromOboeClockId(oboe_clockid_t clockid)
-{
-    clockid_t hostClockId;
-    switch (clockid) {
-        case OBOE_CLOCK_MONOTONIC:
-            hostClockId = CLOCK_MONOTONIC;
-            break;
-        case OBOE_CLOCK_BOOTTIME:
-            hostClockId = CLOCK_BOOTTIME;
-            break;
-        default:
-            hostClockId = 0; // TODO review
-    }
-    return hostClockId;
-}
-
-oboe_nanoseconds_t Oboe_getNanoseconds(oboe_clockid_t clockid)
-{
-    clockid_t hostClockId = OboeConvert_fromOboeClockId(clockid);
-   return AudioClock::getNanoseconds(hostClockId);
-}
-
-OBOE_API oboe_result_t OboeStream_getSampleRate(OboeStream stream, oboe_sample_rate_t *sampleRate)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sampleRate);
-    *sampleRate = audioStream->getSampleRate();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getSamplesPerFrame(OboeStream stream, int32_t *samplesPerFrame)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(samplesPerFrame);
-    *samplesPerFrame = audioStream->getSamplesPerFrame();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getState(OboeStream stream, oboe_stream_state_t *state)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(state);
-    *state = audioStream->getState();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getFormat(OboeStream stream, oboe_audio_format_t *format)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(format);
-    *format = audioStream->getFormat();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_setBufferSize(OboeStream stream,
-                                                oboe_size_frames_t requestedFrames,
-                                                oboe_size_frames_t *actualFrames)
-{
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    return audioStream->setBufferSize(requestedFrames, actualFrames);
-}
-
-OBOE_API oboe_result_t OboeStream_getBufferSize(OboeStream stream, oboe_size_frames_t *frames)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
-    *frames = audioStream->getBufferSize();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getDirection(OboeStream stream, int32_t *direction)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(direction);
-    *direction = audioStream->getDirection();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getFramesPerBurst(OboeStream stream,
-                                                    oboe_size_frames_t *framesPerBurst)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(framesPerBurst);
-    *framesPerBurst = audioStream->getFramesPerBurst();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getBufferCapacity(OboeStream stream,
-                                           oboe_size_frames_t *capacity)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(capacity);
-    *capacity = audioStream->getBufferCapacity();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getXRunCount(OboeStream stream, int32_t *xRunCount)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(xRunCount);
-    *xRunCount = audioStream->getXRunCount();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getSharingMode(OboeStream stream,
-                                                 oboe_sharing_mode_t *sharingMode)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(sharingMode);
-    *sharingMode = audioStream->getSharingMode();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getFramesWritten(OboeStream stream,
-                                                   oboe_position_frames_t *frames)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
-    *frames = audioStream->getFramesWritten();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getFramesRead(OboeStream stream, oboe_position_frames_t *frames)
-{
-    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(frames);
-    *frames = audioStream->getFramesRead();
-    return OBOE_OK;
-}
-
-OBOE_API oboe_result_t OboeStream_getTimestamp(OboeStream stream,
-                                      oboe_clockid_t clockid,
-                                      oboe_position_frames_t *framePosition,
-                                      oboe_nanoseconds_t *timeNanoseconds)
-{
-    AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    if (framePosition == nullptr) {
-        return OBOE_ERROR_NULL;
-    } else if (timeNanoseconds == nullptr) {
-        return OBOE_ERROR_NULL;
-    } else if (clockid != OBOE_CLOCK_MONOTONIC && clockid != OBOE_CLOCK_BOOTTIME) {
-        return OBOE_ERROR_ILLEGAL_ARGUMENT;
-    }
-
-    clockid_t hostClockId = OboeConvert_fromOboeClockId(clockid);
-    return audioStream->getTimestamp(hostClockId, framePosition, timeNanoseconds);
-}
diff --git a/media/liboboe/src/core/README.md b/media/liboboe/src/core/README.md
index dd99286..5ce41f3 100644
--- a/media/liboboe/src/core/README.md
+++ b/media/liboboe/src/core/README.md
@@ -1,2 +1,2 @@
-The core folder contains the essential Oboe files common to all implementations.
-The OboeAudio.cpp contains the 'C' API.
+The core folder contains the essential AAudio files common to all implementations.
+The AAudioAudio.cpp contains the 'C' API.
diff --git a/media/liboboe/src/fifo/FifoBuffer.cpp b/media/liboboe/src/fifo/FifoBuffer.cpp
new file mode 100644
index 0000000..c5489f1
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoBuffer.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstring>
+#include <unistd.h>
+
+#define LOG_TAG "FifoBuffer"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "FifoControllerBase.h"
+#include "FifoController.h"
+#include "FifoControllerIndirect.h"
+#include "FifoBuffer.h"
+
+FifoBuffer::FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames)
+        : mFrameCapacity(capacityInFrames)
+        , mBytesPerFrame(bytesPerFrame)
+        , mStorage(nullptr)
+        , mFramesReadCount(0)
+        , mFramesUnderrunCount(0)
+        , mUnderrunCount(0)
+{
+    // TODO Handle possible failures to allocate. Move out of constructor?
+    mFifo = new FifoController(capacityInFrames, capacityInFrames);
+    // allocate buffer
+    int32_t bytesPerBuffer = bytesPerFrame * capacityInFrames;
+    mStorage = new uint8_t[bytesPerBuffer];
+    mStorageOwned = true;
+    ALOGD("FifoBuffer: capacityInFrames = %d, bytesPerFrame = %d",
+          capacityInFrames, bytesPerFrame);
+}
+
+FifoBuffer::FifoBuffer( int32_t   bytesPerFrame,
+                        fifo_frames_t   capacityInFrames,
+                        fifo_counter_t *  readIndexAddress,
+                        fifo_counter_t *  writeIndexAddress,
+                        void *  dataStorageAddress
+                        )
+        : mFrameCapacity(capacityInFrames)
+        , mBytesPerFrame(bytesPerFrame)
+        , mStorage(static_cast<uint8_t *>(dataStorageAddress))
+        , mFramesReadCount(0)
+        , mFramesUnderrunCount(0)
+        , mUnderrunCount(0)
+{
+    // TODO Handle possible failures to allocate. Move out of constructor?
+    mFifo = new FifoControllerIndirect(capacityInFrames,
+                                       capacityInFrames,
+                                       readIndexAddress,
+                                       writeIndexAddress);
+    mStorageOwned = false;
+    ALOGD("FifoProcessor: capacityInFrames = %d, bytesPerFrame = %d",
+          capacityInFrames, bytesPerFrame);
+}
+
+FifoBuffer::~FifoBuffer() {
+    if (mStorageOwned) {
+        delete[] mStorage;
+    }
+    delete mFifo;
+}
+
+
+int32_t FifoBuffer::convertFramesToBytes(fifo_frames_t frames) {
+    return frames * mBytesPerFrame;
+}
+
+fifo_frames_t FifoBuffer::read(void *buffer, fifo_frames_t numFrames) {
+    size_t numBytes;
+    fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
+    fifo_frames_t framesToRead = numFrames;
+    // Is there enough data in the FIFO
+    if (framesToRead > framesAvailable) {
+        framesToRead = framesAvailable;
+    }
+    if (framesToRead == 0) {
+        return 0;
+    }
+
+    fifo_frames_t readIndex = mFifo->getReadIndex();
+    uint8_t *destination = (uint8_t *) buffer;
+    uint8_t *source = &mStorage[convertFramesToBytes(readIndex)];
+    if ((readIndex + framesToRead) > mFrameCapacity) {
+        // read in two parts, first part here
+        fifo_frames_t frames1 = mFrameCapacity - readIndex;
+        int32_t numBytes = convertFramesToBytes(frames1);
+        memcpy(destination, source, numBytes);
+        destination += numBytes;
+        // read second part
+        source = &mStorage[0];
+        fifo_frames_t frames2 = framesToRead - frames1;
+        numBytes = convertFramesToBytes(frames2);
+        memcpy(destination, source, numBytes);
+    } else {
+        // just read in one shot
+        numBytes = convertFramesToBytes(framesToRead);
+        memcpy(destination, source, numBytes);
+    }
+    mFifo->advanceReadIndex(framesToRead);
+
+    return framesToRead;
+}
+
+fifo_frames_t FifoBuffer::write(const void *buffer, fifo_frames_t framesToWrite) {
+    fifo_frames_t framesAvailable = mFifo->getEmptyFramesAvailable();
+//    ALOGD("FifoBuffer::write() framesToWrite = %d, framesAvailable = %d",
+//         framesToWrite, framesAvailable);
+    if (framesToWrite > framesAvailable) {
+        framesToWrite = framesAvailable;
+    }
+    if (framesToWrite <= 0) {
+        return 0;
+    }
+
+    size_t numBytes;
+    fifo_frames_t writeIndex = mFifo->getWriteIndex();
+    int byteIndex = convertFramesToBytes(writeIndex);
+    const uint8_t *source = (const uint8_t *) buffer;
+    uint8_t *destination = &mStorage[byteIndex];
+    if ((writeIndex + framesToWrite) > mFrameCapacity) {
+        // write in two parts, first part here
+        fifo_frames_t frames1 = mFrameCapacity - writeIndex;
+        numBytes = convertFramesToBytes(frames1);
+        memcpy(destination, source, numBytes);
+//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
+        // read second part
+        source += convertFramesToBytes(frames1);
+        destination = &mStorage[0];
+        fifo_frames_t framesLeft = framesToWrite - frames1;
+        numBytes = convertFramesToBytes(framesLeft);
+//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
+        memcpy(destination, source, numBytes);
+    } else {
+        // just write in one shot
+        numBytes = convertFramesToBytes(framesToWrite);
+//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
+        memcpy(destination, source, numBytes);
+    }
+    mFifo->advanceWriteIndex(framesToWrite);
+
+    return framesToWrite;
+}
+
+fifo_frames_t FifoBuffer::readNow(void *buffer, fifo_frames_t numFrames) {
+    mLastReadSize = numFrames;
+    fifo_frames_t framesLeft = numFrames;
+    fifo_frames_t framesRead = read(buffer, numFrames);
+    framesLeft -= framesRead;
+    mFramesReadCount += framesRead;
+    mFramesUnderrunCount += framesLeft;
+    // Zero out any samples we could not set.
+    if (framesLeft > 0) {
+        mUnderrunCount++;
+        int32_t bytesToZero = convertFramesToBytes(framesLeft);
+        memset(buffer, 0, bytesToZero);
+    }
+
+    return framesRead;
+}
+
+fifo_frames_t FifoBuffer::getThreshold() {
+    return mFifo->getThreshold();
+}
+
+void FifoBuffer::setThreshold(fifo_frames_t threshold) {
+    mFifo->setThreshold(threshold);
+}
+
+fifo_frames_t FifoBuffer::getBufferCapacityInFrames() {
+    return mFifo->getCapacity();
+}
+
diff --git a/media/liboboe/src/fifo/FifoBuffer.h b/media/liboboe/src/fifo/FifoBuffer.h
new file mode 100644
index 0000000..faa9ae2
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoBuffer.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_BUFFER_H
+#define FIFO_FIFO_BUFFER_H
+
+#include <stdint.h>
+
+#include "FifoControllerBase.h"
+
+class FifoBuffer {
+public:
+    FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames);
+
+    FifoBuffer(int32_t   bytesPerFrame,
+               fifo_frames_t   capacityInFrames,
+               fifo_counter_t * readCounterAddress,
+               fifo_counter_t * writeCounterAddress,
+               void * dataStorageAddress);
+
+    ~FifoBuffer();
+
+    int32_t convertFramesToBytes(fifo_frames_t frames);
+
+    fifo_frames_t read(void *destination, fifo_frames_t framesToRead);
+
+    fifo_frames_t write(const void *source, fifo_frames_t framesToWrite);
+
+    fifo_frames_t getThreshold();
+    void setThreshold(fifo_frames_t threshold);
+
+    fifo_frames_t getBufferCapacityInFrames();
+
+    fifo_frames_t readNow(void *buffer, fifo_frames_t numFrames);
+
+    int64_t getNextReadTime(int32_t frameRate);
+
+    int32_t getUnderrunCount() const { return mUnderrunCount; }
+
+    FifoControllerBase *getFifoControllerBase() { return mFifo; }
+
+    int32_t getBytesPerFrame() {
+        return mBytesPerFrame;
+    }
+
+    fifo_counter_t getReadCounter() {
+        return mFifo->getReadCounter();
+    }
+
+    void setReadCounter(fifo_counter_t n) {
+        mFifo->setReadCounter(n);
+    }
+
+    fifo_counter_t getWriteCounter() {
+        return mFifo->getWriteCounter();
+    }
+
+    void setWriteCounter(fifo_counter_t n) {
+        mFifo->setWriteCounter(n);
+    }
+
+private:
+    const fifo_frames_t mFrameCapacity;
+    const int32_t       mBytesPerFrame;
+    uint8_t *           mStorage;
+    bool                mStorageOwned; // did this object allocate the storage?
+    FifoControllerBase *mFifo;
+    fifo_counter_t      mFramesReadCount;
+    fifo_counter_t      mFramesUnderrunCount;
+    int32_t             mUnderrunCount; // need? just use frames
+    int32_t             mLastReadSize;
+};
+
+#endif //FIFO_FIFO_BUFFER_H
diff --git a/media/liboboe/src/fifo/FifoController.h b/media/liboboe/src/fifo/FifoController.h
new file mode 100644
index 0000000..7434634
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoController.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_H
+#define FIFO_FIFO_CONTROLLER_H
+
+#include <stdint.h>
+#include <atomic>
+
+#include "FifoControllerBase.h"
+
+/**
+ * A FIFO with counters contained in the class.
+ */
+class FifoController : public FifoControllerBase
+{
+public:
+    FifoController(fifo_counter_t bufferSize, fifo_counter_t threshold)
+    : FifoControllerBase(bufferSize, threshold)
+    , mReadCounter(0)
+    , mWriteCounter(0)
+    {}
+
+    virtual ~FifoController() {}
+
+    // TODO review use of memory barriers, probably incorrect
+    virtual fifo_counter_t getReadCounter() override {
+        return mReadCounter.load(std::memory_order_acquire);
+    }
+    virtual void setReadCounter(fifo_counter_t n) override {
+        mReadCounter.store(n, std::memory_order_release);
+    }
+    virtual fifo_counter_t getWriteCounter() override {
+        return mWriteCounter.load(std::memory_order_acquire);
+    }
+    virtual void setWriteCounter(fifo_counter_t n) override {
+        mWriteCounter.store(n, std::memory_order_release);
+    }
+
+private:
+    std::atomic<fifo_counter_t> mReadCounter;
+    std::atomic<fifo_counter_t> mWriteCounter;
+};
+
+
+#endif //FIFO_FIFO_CONTROLLER_H
diff --git a/media/liboboe/src/fifo/FifoControllerBase.cpp b/media/liboboe/src/fifo/FifoControllerBase.cpp
new file mode 100644
index 0000000..33a253e
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoControllerBase.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "FifoControllerBase"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include "FifoControllerBase.h"
+
+FifoControllerBase::FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold)
+        : mCapacity(capacity)
+        , mThreshold(threshold)
+{
+}
+
+FifoControllerBase::~FifoControllerBase() {
+}
+
+fifo_frames_t FifoControllerBase::getFullFramesAvailable() {
+    return (fifo_frames_t) (getWriteCounter() - getReadCounter());
+}
+
+fifo_frames_t FifoControllerBase::getReadIndex() {
+    // % works with non-power of two sizes
+    return (fifo_frames_t) (getReadCounter() % mCapacity);
+}
+
+void FifoControllerBase::advanceReadIndex(fifo_frames_t numFrames) {
+    setReadCounter(getReadCounter() + numFrames);
+}
+
+fifo_frames_t FifoControllerBase::getEmptyFramesAvailable() {
+    return (int32_t)(mThreshold - getFullFramesAvailable());
+}
+
+fifo_frames_t FifoControllerBase::getWriteIndex() {
+    // % works with non-power of two sizes
+    return (fifo_frames_t) (getWriteCounter() % mCapacity);
+}
+
+void FifoControllerBase::advanceWriteIndex(fifo_frames_t numFrames) {
+    setWriteCounter(getWriteCounter() + numFrames);
+}
+
+void FifoControllerBase::setThreshold(fifo_frames_t threshold) {
+    mThreshold = threshold;
+}
diff --git a/media/liboboe/src/fifo/FifoControllerBase.h b/media/liboboe/src/fifo/FifoControllerBase.h
new file mode 100644
index 0000000..c543519
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoControllerBase.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_BASE_H
+#define FIFO_FIFO_CONTROLLER_BASE_H
+
+#include <stdint.h>
+
+typedef int64_t fifo_counter_t;
+typedef int32_t fifo_frames_t;
+
+/**
+ * Manage the read/write indices of a circular buffer.
+ *
+ * The caller is responsible for reading and writing the actual data.
+ * Note that the span of available frames may not be contiguous. They
+ * may wrap around from the end to the beginning of the buffer. In that
+ * case the data must be read or written in at least two blocks of frames.
+ *
+ */
+class FifoControllerBase {
+
+public:
+    /**
+     * Constructor for FifoControllerBase
+     * @param capacity Total size of the circular buffer in frames.
+     * @param threshold Number of frames to fill. Must be less than capacity.
+     */
+    FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold);
+
+    virtual ~FifoControllerBase();
+
+    // Abstract methods to be implemented in subclasses.
+    /**
+     * @return Counter used by the reader of the FIFO.
+     */
+    virtual fifo_counter_t getReadCounter() = 0;
+
+    /**
+     * This is normally only used internally.
+     * @param count Number of frames that have been read.
+     */
+    virtual void setReadCounter(fifo_counter_t count) = 0;
+
+    /**
+     * @return Counter used by the reader of the FIFO.
+     */
+    virtual fifo_counter_t getWriteCounter() = 0;
+
+    /**
+     * This is normally only used internally.
+     * @param count Number of frames that have been read.
+     */
+    virtual void setWriteCounter(fifo_counter_t count) = 0;
+
+    /**
+     * This may be negative if an unthrottled reader has read beyond the available data.
+     * @return number of valid frames available to read. Never read more than this.
+     */
+    fifo_frames_t getFullFramesAvailable();
+
+    /**
+     * The index in a circular buffer of the next frame to read.
+     */
+    fifo_frames_t getReadIndex();
+
+    /**
+     * @param numFrames number of frames to advance the read index
+     */
+    void advanceReadIndex(fifo_frames_t numFrames);
+
+    /**
+     * @return number of frames that can be written. Never write more than this.
+     */
+    fifo_frames_t getEmptyFramesAvailable();
+
+    /**
+     * The index in a circular buffer of the next frame to write.
+     */
+    fifo_frames_t getWriteIndex();
+
+    /**
+     * @param numFrames number of frames to advance the write index
+     */
+    void advanceWriteIndex(fifo_frames_t numFrames);
+
+    /**
+     * You can request that the buffer not be filled above a maximum
+     * number of frames.
+     * @param threshold effective size of the buffer
+     */
+    void setThreshold(fifo_frames_t threshold);
+
+    fifo_frames_t getThreshold() const {
+        return mThreshold;
+    }
+
+    fifo_frames_t getCapacity() const {
+        return mCapacity;
+    }
+
+
+private:
+    fifo_frames_t mCapacity;
+    fifo_frames_t mThreshold;
+};
+
+#endif // FIFO_FIFO_CONTROLLER_BASE_H
diff --git a/media/liboboe/src/fifo/FifoControllerIndirect.h b/media/liboboe/src/fifo/FifoControllerIndirect.h
new file mode 100644
index 0000000..1aaf9ea
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoControllerIndirect.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_INDIRECT_H
+#define FIFO_FIFO_CONTROLLER_INDIRECT_H
+
+#include <stdint.h>
+#include <atomic>
+
+#include "FifoControllerBase.h"
+
+/**
+ * A FifoControllerBase with counters external to the class.
+ *
+ * The actual copunters may be stored in separate regions of shared memory
+ * with different access rights.
+ */
+class FifoControllerIndirect : public FifoControllerBase {
+
+public:
+    FifoControllerIndirect(fifo_frames_t capacity,
+                           fifo_frames_t threshold,
+                           fifo_counter_t * readCounterAddress,
+                           fifo_counter_t * writeCounterAddress)
+        : FifoControllerBase(capacity, threshold)
+        , mReadCounterAddress((std::atomic<fifo_counter_t> *) readCounterAddress)
+        , mWriteCounterAddress((std::atomic<fifo_counter_t> *) writeCounterAddress)
+    {
+        setReadCounter(0);
+        setWriteCounter(0);
+    }
+    virtual ~FifoControllerIndirect() {};
+
+    // TODO review use of memory barriers, probably incorrect
+    virtual fifo_counter_t getReadCounter() override {
+        return mReadCounterAddress->load(std::memory_order_acquire);
+    }
+
+    virtual void setReadCounter(fifo_counter_t count) override {
+        mReadCounterAddress->store(count, std::memory_order_release);
+    }
+
+    virtual fifo_counter_t getWriteCounter() override {
+        return mWriteCounterAddress->load(std::memory_order_acquire);
+    }
+
+    virtual void setWriteCounter(fifo_counter_t count) override {
+        mWriteCounterAddress->store(count, std::memory_order_release);
+    }
+
+private:
+    std::atomic<fifo_counter_t> * mReadCounterAddress;
+    std::atomic<fifo_counter_t> * mWriteCounterAddress;
+};
+
+#endif //FIFO_FIFO_CONTROLLER_INDIRECT_H
diff --git a/media/liboboe/src/fifo/README.md b/media/liboboe/src/fifo/README.md
new file mode 100644
index 0000000..5d0c471
--- /dev/null
+++ b/media/liboboe/src/fifo/README.md
@@ -0,0 +1,9 @@
+Simple atomic FIFO for passing data between threads or processes.
+This does not require mutexes.
+
+One thread modifies the readCounter and the other thread modifies the writeCounter.
+
+TODO The internal low-level implementation might be merged in some form with audio_utils fifo
+and/or FMQ [after confirming that requirements are met].
+The higher-levels parts related to AAudio use of the FIFO such as API, fds, relative
+location of indices and data buffer, mapping, allocation of memmory will probably be kept as-is.
diff --git a/media/liboboe/src/legacy/OboeLegacy.h b/media/liboboe/src/legacy/AAudioLegacy.h
similarity index 83%
rename from media/liboboe/src/legacy/OboeLegacy.h
rename to media/liboboe/src/legacy/AAudioLegacy.h
index 6803837..2ceb7d4 100644
--- a/media/liboboe/src/legacy/OboeLegacy.h
+++ b/media/liboboe/src/legacy/AAudioLegacy.h
@@ -14,17 +14,17 @@
  * limitations under the License.
  */
 
-#ifndef OBOE_LEGACY_H
-#define OBOE_LEGACY_H
+#ifndef AAUDIO_LEGACY_H
+#define AAUDIO_LEGACY_H
 
 #include <stdint.h>
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudio.h>
 
 /**
  * Common code for legacy classes.
  */
 
 /* AudioTrack uses a 32-bit frame counter that can wrap around in about a day. */
-typedef uint32_t oboe_wrapping_frames_t;
+typedef uint32_t aaudio_wrapping_frames_t;
 
-#endif /* OBOE_LEGACY_H */
+#endif /* AAUDIO_LEGACY_H */
diff --git a/media/liboboe/src/legacy/AudioStreamRecord.cpp b/media/liboboe/src/legacy/AudioStreamRecord.cpp
index f130cad..2d1785f 100644
--- a/media/liboboe/src/legacy/AudioStreamRecord.cpp
+++ b/media/liboboe/src/legacy/AudioStreamRecord.cpp
@@ -21,13 +21,13 @@
 #include <stdint.h>
 #include <utils/String16.h>
 #include <media/AudioRecord.h>
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudio.h>
 
 #include "AudioClock.h"
 #include "AudioStreamRecord.h"
 
 using namespace android;
-using namespace oboe;
+using namespace aaudio;
 
 AudioStreamRecord::AudioStreamRecord()
     : AudioStream()
@@ -36,34 +36,34 @@
 
 AudioStreamRecord::~AudioStreamRecord()
 {
-    const oboe_stream_state_t state = getState();
-    bool bad = !(state == OBOE_STREAM_STATE_UNINITIALIZED || state == OBOE_STREAM_STATE_CLOSED);
+    const aaudio_stream_state_t state = getState();
+    bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
     ALOGE_IF(bad, "stream not closed, in state %d", state);
 }
 
-oboe_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
+aaudio_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
 {
-    oboe_result_t result = OBOE_OK;
+    aaudio_result_t result = AAUDIO_OK;
 
     result = AudioStream::open(builder);
-    if (result != OBOE_OK) {
+    if (result != AAUDIO_OK) {
         return result;
     }
 
     // Try to create an AudioRecord
 
     // TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
-    int32_t samplesPerFrame = (getSamplesPerFrame() == OBOE_UNSPECIFIED)
+    int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
                               ? 2 : getSamplesPerFrame();
     audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(samplesPerFrame);
 
-    AudioRecord::callback_t callback = NULL;
+    AudioRecord::callback_t callback = nullptr;
     audio_input_flags_t flags = (audio_input_flags_t) AUDIO_INPUT_FLAG_NONE;
 
     // TODO implement an unspecified Android format then use that.
-    audio_format_t format = (getFormat() == OBOE_UNSPECIFIED)
+    audio_format_t format = (getFormat() == AAUDIO_UNSPECIFIED)
             ? AUDIO_FORMAT_PCM_FLOAT
-            : OboeConvert_oboeToAndroidDataFormat(getFormat());
+            : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
 
     mAudioRecord = new AudioRecord(
             AUDIO_SOURCE_DEFAULT,
@@ -75,14 +75,14 @@
 
             0,    //    size_t frameCount = 0,
             callback,
-            NULL, //    void* user = NULL,
+            nullptr, //    void* user = nullptr,
             0,    //    uint32_t notificationFrames = 0,
             AUDIO_SESSION_ALLOCATE,
             AudioRecord::TRANSFER_DEFAULT,
             flags
              //   int uid = -1,
              //   pid_t pid = -1,
-             //   const audio_attributes_t* pAttributes = NULL
+             //   const audio_attributes_t* pAttributes = nullptr
              );
 
     // Did we get a valid track?
@@ -90,84 +90,84 @@
     if (status != OK) {
         close();
         ALOGE("AudioStreamRecord::open(), initCheck() returned %d", status);
-        return OboeConvert_androidToOboeError(status);
+        return AAudioConvert_androidToAAudioResult(status);
     }
 
     // Get the actual rate.
     setSampleRate(mAudioRecord->getSampleRate());
     setSamplesPerFrame(mAudioRecord->channelCount());
-    setFormat(OboeConvert_androidToOboeDataFormat(mAudioRecord->format()));
+    setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioRecord->format()));
 
-    setState(OBOE_STREAM_STATE_OPEN);
+    setState(AAUDIO_STREAM_STATE_OPEN);
 
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_result_t AudioStreamRecord::close()
+aaudio_result_t AudioStreamRecord::close()
 {
     // TODO add close() or release() to AudioRecord API then call it from here
-    if (getState() != OBOE_STREAM_STATE_CLOSED) {
+    if (getState() != AAUDIO_STREAM_STATE_CLOSED) {
         mAudioRecord.clear();
-        setState(OBOE_STREAM_STATE_CLOSED);
+        setState(AAUDIO_STREAM_STATE_CLOSED);
     }
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_result_t AudioStreamRecord::requestStart()
+aaudio_result_t AudioStreamRecord::requestStart()
 {
-    if (mAudioRecord.get() == NULL) {
-        return OBOE_ERROR_INVALID_STATE;
+    if (mAudioRecord.get() == nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
     }
     // Get current position so we can detect when the track is playing.
     status_t err = mAudioRecord->getPosition(&mPositionWhenStarting);
     if (err != OK) {
-        return OboeConvert_androidToOboeError(err);
+        return AAudioConvert_androidToAAudioResult(err);
     }
     err = mAudioRecord->start();
     if (err != OK) {
-        return OboeConvert_androidToOboeError(err);
+        return AAudioConvert_androidToAAudioResult(err);
     } else {
-        setState(OBOE_STREAM_STATE_STARTING);
+        setState(AAUDIO_STREAM_STATE_STARTING);
     }
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_result_t AudioStreamRecord::requestPause()
+aaudio_result_t AudioStreamRecord::requestPause()
 {
-    return OBOE_ERROR_UNIMPLEMENTED;
+    return AAUDIO_ERROR_UNIMPLEMENTED;
 }
 
-oboe_result_t AudioStreamRecord::requestFlush() {
-    return OBOE_ERROR_UNIMPLEMENTED;
+aaudio_result_t AudioStreamRecord::requestFlush() {
+    return AAUDIO_ERROR_UNIMPLEMENTED;
 }
 
-oboe_result_t AudioStreamRecord::requestStop() {
-    if (mAudioRecord.get() == NULL) {
-        return OBOE_ERROR_INVALID_STATE;
+aaudio_result_t AudioStreamRecord::requestStop() {
+    if (mAudioRecord.get() == nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
     }
-    setState(OBOE_STREAM_STATE_STOPPING);
+    setState(AAUDIO_STREAM_STATE_STOPPING);
     mAudioRecord->stop();
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_result_t AudioStreamRecord::updateState()
+aaudio_result_t AudioStreamRecord::updateState()
 {
-    oboe_result_t result = OBOE_OK;
-    oboe_wrapping_frames_t position;
+    aaudio_result_t result = AAUDIO_OK;
+    aaudio_wrapping_frames_t position;
     status_t err;
     switch (getState()) {
     // TODO add better state visibility to AudioRecord
-    case OBOE_STREAM_STATE_STARTING:
+    case AAUDIO_STREAM_STATE_STARTING:
         err = mAudioRecord->getPosition(&position);
         if (err != OK) {
-            result = OboeConvert_androidToOboeError(err);
+            result = AAudioConvert_androidToAAudioResult(err);
         } else if (position != mPositionWhenStarting) {
-            setState(OBOE_STREAM_STATE_STARTED);
+            setState(AAUDIO_STREAM_STATE_STARTED);
         }
         break;
-    case OBOE_STREAM_STATE_STOPPING:
+    case AAUDIO_STREAM_STATE_STOPPING:
         if (mAudioRecord->stopped()) {
-            setState(OBOE_STREAM_STATE_STOPPED);
+            setState(AAUDIO_STREAM_STATE_STOPPED);
         }
         break;
     default:
@@ -176,14 +176,14 @@
     return result;
 }
 
-oboe_result_t AudioStreamRecord::read(void *buffer,
-                                      oboe_size_frames_t numFrames,
-                                      oboe_nanoseconds_t timeoutNanoseconds)
+aaudio_result_t AudioStreamRecord::read(void *buffer,
+                                      aaudio_size_frames_t numFrames,
+                                      aaudio_nanoseconds_t timeoutNanoseconds)
 {
-    oboe_size_frames_t bytesPerFrame = getBytesPerFrame();
-    oboe_size_bytes_t numBytes;
-    oboe_result_t result = OboeConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
-    if (result != OBOE_OK) {
+    aaudio_size_frames_t bytesPerFrame = getBytesPerFrame();
+    aaudio_size_bytes_t numBytes;
+    aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
+    if (result != AAUDIO_OK) {
         return result;
     }
 
@@ -193,35 +193,35 @@
     if (bytesRead == WOULD_BLOCK) {
         return 0;
     } else if (bytesRead < 0) {
-        return OboeConvert_androidToOboeError(bytesRead);
+        return AAudioConvert_androidToAAudioResult(bytesRead);
     }
-    oboe_size_frames_t framesRead = (oboe_size_frames_t)(bytesRead / bytesPerFrame);
-    return (oboe_result_t) framesRead;
+    aaudio_size_frames_t framesRead = (aaudio_size_frames_t)(bytesRead / bytesPerFrame);
+    return (aaudio_result_t) framesRead;
 }
 
-oboe_result_t AudioStreamRecord::setBufferSize(oboe_size_frames_t requestedFrames,
-                                             oboe_size_frames_t *actualFrames)
+aaudio_result_t AudioStreamRecord::setBufferSize(aaudio_size_frames_t requestedFrames,
+                                             aaudio_size_frames_t *actualFrames)
 {
     *actualFrames = getBufferCapacity();
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_size_frames_t AudioStreamRecord::getBufferSize() const
+aaudio_size_frames_t AudioStreamRecord::getBufferSize() const
 {
     return getBufferCapacity(); // TODO implement in AudioRecord?
 }
 
-oboe_size_frames_t AudioStreamRecord::getBufferCapacity() const
+aaudio_size_frames_t AudioStreamRecord::getBufferCapacity() const
 {
-    return static_cast<oboe_size_frames_t>(mAudioRecord->frameCount());
+    return static_cast<aaudio_size_frames_t>(mAudioRecord->frameCount());
 }
 
 int32_t AudioStreamRecord::getXRunCount() const
 {
-    return OBOE_ERROR_UNIMPLEMENTED; // TODO implement when AudioRecord supports it
+    return AAUDIO_ERROR_UNIMPLEMENTED; // TODO implement when AudioRecord supports it
 }
 
-oboe_size_frames_t AudioStreamRecord::getFramesPerBurst() const
+aaudio_size_frames_t AudioStreamRecord::getFramesPerBurst() const
 {
     return 192; // TODO add query to AudioRecord.cpp
 }
diff --git a/media/liboboe/src/legacy/AudioStreamRecord.h b/media/liboboe/src/legacy/AudioStreamRecord.h
index 02ff220..a2ac9f3 100644
--- a/media/liboboe/src/legacy/AudioStreamRecord.h
+++ b/media/liboboe/src/legacy/AudioStreamRecord.h
@@ -14,17 +14,17 @@
  * limitations under the License.
  */
 
-#ifndef LEGACY_AUDIOSTREAMRECORD_H
-#define LEGACY_AUDIOSTREAMRECORD_H
+#ifndef LEGACY_AUDIO_STREAM_RECORD_H
+#define LEGACY_AUDIO_STREAM_RECORD_H
 
 #include <media/AudioRecord.h>
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudio.h>
 
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
-#include "OboeLegacy.h"
+#include "AAudioLegacy.h"
 
-namespace oboe {
+namespace aaudio {
 
 /**
  * Internal stream that uses the legacy AudioTrack path.
@@ -35,44 +35,44 @@
 
     virtual ~AudioStreamRecord();
 
-    virtual oboe_result_t open(const AudioStreamBuilder & builder) override;
-    virtual oboe_result_t close() override;
+    virtual aaudio_result_t open(const AudioStreamBuilder & builder) override;
+    virtual aaudio_result_t close() override;
 
-    virtual oboe_result_t requestStart() override;
-    virtual oboe_result_t requestPause() override;
-    virtual oboe_result_t requestFlush() override;
-    virtual oboe_result_t requestStop() override;
+    virtual aaudio_result_t requestStart() override;
+    virtual aaudio_result_t requestPause() override;
+    virtual aaudio_result_t requestFlush() override;
+    virtual aaudio_result_t requestStop() override;
 
-    virtual oboe_result_t getTimestamp(clockid_t clockId,
-                                       oboe_position_frames_t *framePosition,
-                                       oboe_nanoseconds_t *timeNanoseconds) override {
-        return OBOE_ERROR_UNIMPLEMENTED; // TODO
+    virtual aaudio_result_t getTimestamp(clockid_t clockId,
+                                       aaudio_position_frames_t *framePosition,
+                                       aaudio_nanoseconds_t *timeNanoseconds) override {
+        return AAUDIO_ERROR_UNIMPLEMENTED; // TODO
     }
 
-    virtual oboe_result_t read(void *buffer,
-                             oboe_size_frames_t numFrames,
-                             oboe_nanoseconds_t timeoutNanoseconds) override;
+    virtual aaudio_result_t read(void *buffer,
+                             aaudio_size_frames_t numFrames,
+                             aaudio_nanoseconds_t timeoutNanoseconds) override;
 
-    virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
-                                             oboe_size_frames_t *actualFrames) override;
+    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
+                                             aaudio_size_frames_t *actualFrames) override;
 
-    virtual oboe_size_frames_t getBufferSize() const override;
+    virtual aaudio_size_frames_t getBufferSize() const override;
 
-    virtual oboe_size_frames_t getBufferCapacity() const override;
+    virtual aaudio_size_frames_t getBufferCapacity() const override;
 
     virtual int32_t getXRunCount() const override;
 
-    virtual oboe_size_frames_t getFramesPerBurst() const override;
+    virtual aaudio_size_frames_t getFramesPerBurst() const override;
 
-    virtual oboe_result_t updateState() override;
+    virtual aaudio_result_t updateState() override;
 
 private:
     android::sp<android::AudioRecord> mAudioRecord;
     // TODO add 64-bit position reporting to AudioRecord and use it.
-    oboe_wrapping_frames_t   mPositionWhenStarting = 0;
+    aaudio_wrapping_frames_t   mPositionWhenStarting = 0;
     android::String16        mOpPackageName;
 };
 
-} /* namespace oboe */
+} /* namespace aaudio */
 
-#endif /* LEGACY_AUDIOSTREAMRECORD_H */
+#endif /* LEGACY_AUDIO_STREAM_RECORD_H */
diff --git a/media/liboboe/src/legacy/AudioStreamTrack.cpp b/media/liboboe/src/legacy/AudioStreamTrack.cpp
index 5205fc5..a60b5b4 100644
--- a/media/liboboe/src/legacy/AudioStreamTrack.cpp
+++ b/media/liboboe/src/legacy/AudioStreamTrack.cpp
@@ -21,13 +21,13 @@
 #include <stdint.h>
 #include <media/AudioTrack.h>
 
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudio.h>
 #include "AudioClock.h"
 #include "AudioStreamTrack.h"
 
 
 using namespace android;
-using namespace oboe;
+using namespace aaudio;
 
 /*
  * Create a stream that uses the AudioTrack.
@@ -39,14 +39,14 @@
 
 AudioStreamTrack::~AudioStreamTrack()
 {
-    const oboe_stream_state_t state = getState();
-    bool bad = !(state == OBOE_STREAM_STATE_UNINITIALIZED || state == OBOE_STREAM_STATE_CLOSED);
+    const aaudio_stream_state_t state = getState();
+    bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
     ALOGE_IF(bad, "stream not closed, in state %d", state);
 }
 
-oboe_result_t AudioStreamTrack::open(const AudioStreamBuilder& builder)
+aaudio_result_t AudioStreamTrack::open(const AudioStreamBuilder& builder)
 {
-    oboe_result_t result = OBOE_OK;
+    aaudio_result_t result = AAUDIO_OK;
 
     result = AudioStream::open(builder);
     if (result != OK) {
@@ -55,20 +55,20 @@
 
     // Try to create an AudioTrack
     // TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
-    int32_t samplesPerFrame = (getSamplesPerFrame() == OBOE_UNSPECIFIED)
+    int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
                               ? 2 : getSamplesPerFrame();
     audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(samplesPerFrame);
-    ALOGE("AudioStreamTrack::open(), samplesPerFrame = %d, channelMask = 0x%08x",
+    ALOGD("AudioStreamTrack::open(), samplesPerFrame = %d, channelMask = 0x%08x",
             samplesPerFrame, channelMask);
 
-    AudioTrack::callback_t callback = NULL;
+    AudioTrack::callback_t callback = nullptr;
     // TODO add more performance options
     audio_output_flags_t flags = (audio_output_flags_t) AUDIO_OUTPUT_FLAG_FAST;
     size_t frameCount = 0;
     // TODO implement an unspecified AudioTrack format then use that.
-    audio_format_t format = (getFormat() == OBOE_UNSPECIFIED)
+    audio_format_t format = (getFormat() == AAUDIO_UNSPECIFIED)
             ? AUDIO_FORMAT_PCM_FLOAT
-            : OboeConvert_oboeToAndroidDataFormat(getFormat());
+            : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
 
     mAudioTrack = new AudioTrack(
             (audio_stream_type_t) AUDIO_STREAM_MUSIC,
@@ -78,156 +78,155 @@
             frameCount,
             flags,
             callback,
-            NULL,    // user callback data
-            0,       // notificationFrames
+            nullptr,    // user callback data
+            0,          // notificationFrames
             AUDIO_SESSION_ALLOCATE,
             AudioTrack::transfer_type::TRANSFER_SYNC // TODO - this does not allow FAST
             );
 
     // Did we get a valid track?
     status_t status = mAudioTrack->initCheck();
-    // FIXME - this should work - if (status != NO_ERROR) {
-    //         But initCheck() is returning 1 !
-    if (status < 0) {
+    ALOGD("AudioStreamTrack::open(), initCheck() returned %d", status);
+    if (status != NO_ERROR) {
         close();
         ALOGE("AudioStreamTrack::open(), initCheck() returned %d", status);
-        return OboeConvert_androidToOboeError(status);
+        return AAudioConvert_androidToAAudioResult(status);
     }
 
     // Get the actual values from the AudioTrack.
     setSamplesPerFrame(mAudioTrack->channelCount());
     setSampleRate(mAudioTrack->getSampleRate());
-    setFormat(OboeConvert_androidToOboeDataFormat(mAudioTrack->format()));
+    setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format()));
 
-    setState(OBOE_STREAM_STATE_OPEN);
+    setState(AAUDIO_STREAM_STATE_OPEN);
 
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_result_t AudioStreamTrack::close()
+aaudio_result_t AudioStreamTrack::close()
 {
     // TODO maybe add close() or release() to AudioTrack API then call it from here
-    if (getState() != OBOE_STREAM_STATE_CLOSED) {
+    if (getState() != AAUDIO_STREAM_STATE_CLOSED) {
         mAudioTrack.clear(); // TODO is this right?
-        setState(OBOE_STREAM_STATE_CLOSED);
+        setState(AAUDIO_STREAM_STATE_CLOSED);
     }
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_result_t AudioStreamTrack::requestStart()
+aaudio_result_t AudioStreamTrack::requestStart()
 {
-    if (mAudioTrack.get() == NULL) {
-        return OBOE_ERROR_INVALID_STATE;
+    if (mAudioTrack.get() == nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
     }
     // Get current position so we can detect when the track is playing.
     status_t err = mAudioTrack->getPosition(&mPositionWhenStarting);
     if (err != OK) {
-        return OboeConvert_androidToOboeError(err);
+        return AAudioConvert_androidToAAudioResult(err);
     }
     err = mAudioTrack->start();
     if (err != OK) {
-        return OboeConvert_androidToOboeError(err);
+        return AAudioConvert_androidToAAudioResult(err);
     } else {
-        setState(OBOE_STREAM_STATE_STARTING);
+        setState(AAUDIO_STREAM_STATE_STARTING);
     }
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_result_t AudioStreamTrack::requestPause()
+aaudio_result_t AudioStreamTrack::requestPause()
 {
-    if (mAudioTrack.get() == NULL) {
-        return OBOE_ERROR_INVALID_STATE;
-    } else if (getState() != OBOE_STREAM_STATE_STARTING
-            && getState() != OBOE_STREAM_STATE_STARTED) {
-        ALOGE("requestPause(), called when state is %s", Oboe_convertStreamStateToText(getState()));
-        return OBOE_ERROR_INVALID_STATE;
+    if (mAudioTrack.get() == nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    } else if (getState() != AAUDIO_STREAM_STATE_STARTING
+            && getState() != AAUDIO_STREAM_STATE_STARTED) {
+        ALOGE("requestPause(), called when state is %s", AAudio_convertStreamStateToText(getState()));
+        return AAUDIO_ERROR_INVALID_STATE;
     }
-    setState(OBOE_STREAM_STATE_PAUSING);
+    setState(AAUDIO_STREAM_STATE_PAUSING);
     mAudioTrack->pause();
     status_t err = mAudioTrack->getPosition(&mPositionWhenPausing);
     if (err != OK) {
-        return OboeConvert_androidToOboeError(err);
+        return AAudioConvert_androidToAAudioResult(err);
     }
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_result_t AudioStreamTrack::requestFlush() {
-    if (mAudioTrack.get() == NULL) {
-        return OBOE_ERROR_INVALID_STATE;
-    } else if (getState() != OBOE_STREAM_STATE_PAUSED) {
-        return OBOE_ERROR_INVALID_STATE;
+aaudio_result_t AudioStreamTrack::requestFlush() {
+    if (mAudioTrack.get() == nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    } else if (getState() != AAUDIO_STREAM_STATE_PAUSED) {
+        return AAUDIO_ERROR_INVALID_STATE;
     }
-    setState(OBOE_STREAM_STATE_FLUSHING);
+    setState(AAUDIO_STREAM_STATE_FLUSHING);
     incrementFramesRead(getFramesWritten() - getFramesRead());
     mAudioTrack->flush();
     mFramesWritten.reset32();
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_result_t AudioStreamTrack::requestStop() {
-    if (mAudioTrack.get() == NULL) {
-        return OBOE_ERROR_INVALID_STATE;
+aaudio_result_t AudioStreamTrack::requestStop() {
+    if (mAudioTrack.get() == nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
     }
-    setState(OBOE_STREAM_STATE_STOPPING);
+    setState(AAUDIO_STREAM_STATE_STOPPING);
     incrementFramesRead(getFramesWritten() - getFramesRead()); // TODO review
     mAudioTrack->stop();
     mFramesWritten.reset32();
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_result_t AudioStreamTrack::updateState()
+aaudio_result_t AudioStreamTrack::updateState()
 {
     status_t err;
-    oboe_wrapping_frames_t position;
+    aaudio_wrapping_frames_t position;
     switch (getState()) {
     // TODO add better state visibility to AudioTrack
-    case OBOE_STREAM_STATE_STARTING:
+    case AAUDIO_STREAM_STATE_STARTING:
         if (mAudioTrack->hasStarted()) {
-            setState(OBOE_STREAM_STATE_STARTED);
+            setState(AAUDIO_STREAM_STATE_STARTED);
         }
         break;
-    case OBOE_STREAM_STATE_PAUSING:
+    case AAUDIO_STREAM_STATE_PAUSING:
         if (mAudioTrack->stopped()) {
             err = mAudioTrack->getPosition(&position);
             if (err != OK) {
-                return OboeConvert_androidToOboeError(err);
+                return AAudioConvert_androidToAAudioResult(err);
             } else if (position == mPositionWhenPausing) {
                 // Has stream really stopped advancing?
-                setState(OBOE_STREAM_STATE_PAUSED);
+                setState(AAUDIO_STREAM_STATE_PAUSED);
             }
             mPositionWhenPausing = position;
         }
         break;
-    case OBOE_STREAM_STATE_FLUSHING:
+    case AAUDIO_STREAM_STATE_FLUSHING:
         {
             err = mAudioTrack->getPosition(&position);
             if (err != OK) {
-                return OboeConvert_androidToOboeError(err);
+                return AAudioConvert_androidToAAudioResult(err);
             } else if (position == 0) {
                 // Advance frames read to match written.
-                setState(OBOE_STREAM_STATE_FLUSHED);
+                setState(AAUDIO_STREAM_STATE_FLUSHED);
             }
         }
         break;
-    case OBOE_STREAM_STATE_STOPPING:
+    case AAUDIO_STREAM_STATE_STOPPING:
         if (mAudioTrack->stopped()) {
-            setState(OBOE_STREAM_STATE_STOPPED);
+            setState(AAUDIO_STREAM_STATE_STOPPED);
         }
         break;
     default:
         break;
     }
-    return OBOE_OK;
+    return AAUDIO_OK;
 }
 
-oboe_result_t AudioStreamTrack::write(const void *buffer,
-                                      oboe_size_frames_t numFrames,
-                                      oboe_nanoseconds_t timeoutNanoseconds)
+aaudio_result_t AudioStreamTrack::write(const void *buffer,
+                                      aaudio_size_frames_t numFrames,
+                                      aaudio_nanoseconds_t timeoutNanoseconds)
 {
-    oboe_size_frames_t bytesPerFrame = getBytesPerFrame();
-    oboe_size_bytes_t numBytes;
-    oboe_result_t result = OboeConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
-    if (result != OBOE_OK) {
+    aaudio_size_frames_t bytesPerFrame = getBytesPerFrame();
+    aaudio_size_bytes_t numBytes;
+    aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
+    if (result != AAUDIO_OK) {
         return result;
     }
 
@@ -238,33 +237,33 @@
         return 0;
     } else if (bytesWritten < 0) {
         ALOGE("invalid write, returned %d", (int)bytesWritten);
-        return OboeConvert_androidToOboeError(bytesWritten);
+        return AAudioConvert_androidToAAudioResult(bytesWritten);
     }
-    oboe_size_frames_t framesWritten = (oboe_size_frames_t)(bytesWritten / bytesPerFrame);
+    aaudio_size_frames_t framesWritten = (aaudio_size_frames_t)(bytesWritten / bytesPerFrame);
     incrementFramesWritten(framesWritten);
     return framesWritten;
 }
 
-oboe_result_t AudioStreamTrack::setBufferSize(oboe_size_frames_t requestedFrames,
-                                             oboe_size_frames_t *actualFrames)
+aaudio_result_t AudioStreamTrack::setBufferSize(aaudio_size_frames_t requestedFrames,
+                                             aaudio_size_frames_t *actualFrames)
 {
     ssize_t result = mAudioTrack->setBufferSizeInFrames(requestedFrames);
     if (result != OK) {
-        return OboeConvert_androidToOboeError(result);
+        return AAudioConvert_androidToAAudioResult(result);
     } else {
         *actualFrames = result;
-        return OBOE_OK;
+        return AAUDIO_OK;
     }
 }
 
-oboe_size_frames_t AudioStreamTrack::getBufferSize() const
+aaudio_size_frames_t AudioStreamTrack::getBufferSize() const
 {
-    return static_cast<oboe_size_frames_t>(mAudioTrack->getBufferSizeInFrames());
+    return static_cast<aaudio_size_frames_t>(mAudioTrack->getBufferSizeInFrames());
 }
 
-oboe_size_frames_t AudioStreamTrack::getBufferCapacity() const
+aaudio_size_frames_t AudioStreamTrack::getBufferCapacity() const
 {
-    return static_cast<oboe_size_frames_t>(mAudioTrack->frameCount());
+    return static_cast<aaudio_size_frames_t>(mAudioTrack->frameCount());
 }
 
 int32_t AudioStreamTrack::getXRunCount() const
@@ -277,13 +276,13 @@
     return 192; // TODO add query to AudioTrack.cpp
 }
 
-oboe_position_frames_t AudioStreamTrack::getFramesRead() {
-    oboe_wrapping_frames_t position;
+aaudio_position_frames_t AudioStreamTrack::getFramesRead() {
+    aaudio_wrapping_frames_t position;
     status_t result;
     switch (getState()) {
-    case OBOE_STREAM_STATE_STARTING:
-    case OBOE_STREAM_STATE_STARTED:
-    case OBOE_STREAM_STATE_STOPPING:
+    case AAUDIO_STREAM_STATE_STARTING:
+    case AAUDIO_STREAM_STATE_STARTED:
+    case AAUDIO_STREAM_STATE_STOPPING:
         result = mAudioTrack->getPosition(&position);
         if (result == OK) {
             mFramesRead.update32(position);
diff --git a/media/liboboe/src/legacy/AudioStreamTrack.h b/media/liboboe/src/legacy/AudioStreamTrack.h
index 8c40884..73d0cac 100644
--- a/media/liboboe/src/legacy/AudioStreamTrack.h
+++ b/media/liboboe/src/legacy/AudioStreamTrack.h
@@ -14,17 +14,17 @@
  * limitations under the License.
  */
 
-#ifndef LEGACY_AUDIOSTREAMTRACK_H
-#define LEGACY_AUDIOSTREAMTRACK_H
+#ifndef LEGACY_AUDIO_STREAM_TRACK_H
+#define LEGACY_AUDIO_STREAM_TRACK_H
 
 #include <media/AudioTrack.h>
-#include <oboe/OboeAudio.h>
+#include <aaudio/AAudio.h>
 
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
-#include "OboeLegacy.h"
+#include "AAudioLegacy.h"
 
-namespace oboe {
+namespace aaudio {
 
 
 /**
@@ -37,42 +37,42 @@
     virtual ~AudioStreamTrack();
 
 
-    virtual oboe_result_t open(const AudioStreamBuilder & builder) override;
-    virtual oboe_result_t close() override;
+    virtual aaudio_result_t open(const AudioStreamBuilder & builder) override;
+    virtual aaudio_result_t close() override;
 
-    virtual oboe_result_t requestStart() override;
-    virtual oboe_result_t requestPause() override;
-    virtual oboe_result_t requestFlush() override;
-    virtual oboe_result_t requestStop() override;
+    virtual aaudio_result_t requestStart() override;
+    virtual aaudio_result_t requestPause() override;
+    virtual aaudio_result_t requestFlush() override;
+    virtual aaudio_result_t requestStop() override;
 
-    virtual oboe_result_t getTimestamp(clockid_t clockId,
-                                       oboe_position_frames_t *framePosition,
-                                       oboe_nanoseconds_t *timeNanoseconds) override {
-        return OBOE_ERROR_UNIMPLEMENTED; // TODO call getTimestamp(ExtendedTimestamp *timestamp);
+    virtual aaudio_result_t getTimestamp(clockid_t clockId,
+                                       aaudio_position_frames_t *framePosition,
+                                       aaudio_nanoseconds_t *timeNanoseconds) override {
+        return AAUDIO_ERROR_UNIMPLEMENTED; // TODO call getTimestamp(ExtendedTimestamp *timestamp);
     }
 
-    virtual oboe_result_t write(const void *buffer,
-                             oboe_size_frames_t numFrames,
-                             oboe_nanoseconds_t timeoutNanoseconds) override;
+    virtual aaudio_result_t write(const void *buffer,
+                             aaudio_size_frames_t numFrames,
+                             aaudio_nanoseconds_t timeoutNanoseconds) override;
 
-    virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
-                                             oboe_size_frames_t *actualFrames) override;
-    virtual oboe_size_frames_t getBufferSize() const override;
-    virtual oboe_size_frames_t getBufferCapacity() const override;
-    virtual oboe_size_frames_t getFramesPerBurst()const  override;
+    virtual aaudio_result_t setBufferSize(aaudio_size_frames_t requestedFrames,
+                                             aaudio_size_frames_t *actualFrames) override;
+    virtual aaudio_size_frames_t getBufferSize() const override;
+    virtual aaudio_size_frames_t getBufferCapacity() const override;
+    virtual aaudio_size_frames_t getFramesPerBurst()const  override;
     virtual int32_t getXRunCount() const override;
 
-    virtual oboe_position_frames_t getFramesRead() override;
+    virtual aaudio_position_frames_t getFramesRead() override;
 
-    virtual oboe_result_t updateState() override;
+    virtual aaudio_result_t updateState() override;
 
 private:
     android::sp<android::AudioTrack> mAudioTrack;
     // TODO add 64-bit position reporting to AudioRecord and use it.
-    oboe_wrapping_frames_t           mPositionWhenStarting = 0;
-    oboe_wrapping_frames_t           mPositionWhenPausing = 0;
+    aaudio_wrapping_frames_t           mPositionWhenStarting = 0;
+    aaudio_wrapping_frames_t           mPositionWhenPausing = 0;
 };
 
-} /* namespace oboe */
+} /* namespace aaudio */
 
-#endif /* LEGACY_AUDIOSTREAMTRACK_H */
+#endif /* LEGACY_AUDIO_STREAM_TRACK_H */
diff --git a/media/liboboe/src/legacy/README.md b/media/liboboe/src/legacy/README.md
index b51c44b..8805915 100644
--- a/media/liboboe/src/legacy/README.md
+++ b/media/liboboe/src/legacy/README.md
@@ -1,2 +1,2 @@
-The legacy folder contains the classes that implement Oboe AudioStream on top of
+The legacy folder contains the classes that implement AAudio AudioStream on top of
 Android AudioTrack and AudioRecord.
diff --git a/media/liboboe/src/utility/AAudioUtilities.cpp b/media/liboboe/src/utility/AAudioUtilities.cpp
new file mode 100644
index 0000000..34c1ae4
--- /dev/null
+++ b/media/liboboe/src/utility/AAudioUtilities.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <utils/Errors.h>
+
+#include "aaudio/AAudioDefinitions.h"
+#include "AAudioUtilities.h"
+
+using namespace android;
+
+aaudio_size_bytes_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format) {
+    aaudio_size_bytes_t size = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    switch (format) {
+        case AAUDIO_FORMAT_PCM_I16:
+            size = sizeof(int16_t);
+            break;
+        case AAUDIO_FORMAT_PCM_I32:
+        case AAUDIO_FORMAT_PCM_I8_24:
+            size = sizeof(int32_t);
+            break;
+        case AAUDIO_FORMAT_PCM_FLOAT:
+            size = sizeof(float);
+            break;
+        default:
+            break;
+    }
+    return size;
+}
+
+// TODO This similar to a function in audio_utils. Consider using that instead.
+void AAudioConvert_floatToPcm16(const float *source, int32_t numSamples, int16_t *destination) {
+    for (int i = 0; i < numSamples; i++) {
+        float fval = source[i];
+        fval += 1.0; // to avoid discontinuity at 0.0 caused by truncation
+        fval *= 32768.0f;
+        int32_t sample = (int32_t) fval;
+        // clip to 16-bit range
+        if (sample < 0) sample = 0;
+        else if (sample > 0x0FFFF) sample = 0x0FFFF;
+        sample -= 32768; // center at zero
+        destination[i] = (int16_t) sample;
+    }
+}
+
+void AAudioConvert_pcm16ToFloat(const float *source, int32_t numSamples, int16_t *destination) {
+    for (int i = 0; i < numSamples; i++) {
+        destination[i] = source[i] * (1.0f / 32768.0f);
+    }
+}
+
+status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result) {
+    // This covers the case for AAUDIO_OK and for positive results.
+    if (result >= 0) {
+        return result;
+    }
+    status_t status;
+    switch (result) {
+    case AAUDIO_ERROR_DISCONNECTED:
+    case AAUDIO_ERROR_INVALID_HANDLE:
+        status = DEAD_OBJECT;
+        break;
+    case AAUDIO_ERROR_INVALID_STATE:
+        status = INVALID_OPERATION;
+        break;
+    case AAUDIO_ERROR_UNEXPECTED_VALUE: // TODO redundant?
+    case AAUDIO_ERROR_ILLEGAL_ARGUMENT:
+        status = BAD_VALUE;
+        break;
+    case AAUDIO_ERROR_WOULD_BLOCK:
+        status = WOULD_BLOCK;
+        break;
+    // TODO add more result codes
+    default:
+        status = UNKNOWN_ERROR;
+        break;
+    }
+    return status;
+}
+
+aaudio_result_t AAudioConvert_androidToAAudioResult(status_t status) {
+    // This covers the case for OK and for positive result.
+    if (status >= 0) {
+        return status;
+    }
+    aaudio_result_t result;
+    switch (status) {
+    case BAD_TYPE:
+        result = AAUDIO_ERROR_INVALID_HANDLE;
+        break;
+    case DEAD_OBJECT:
+        result = AAUDIO_ERROR_DISCONNECTED;
+        break;
+    case INVALID_OPERATION:
+        result = AAUDIO_ERROR_INVALID_STATE;
+        break;
+    case BAD_VALUE:
+        result = AAUDIO_ERROR_UNEXPECTED_VALUE;
+        break;
+    case WOULD_BLOCK:
+        result = AAUDIO_ERROR_WOULD_BLOCK;
+        break;
+    // TODO add more status codes
+    default:
+        result = AAUDIO_ERROR_INTERNAL;
+        break;
+    }
+    return result;
+}
+
+audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_audio_format_t aaudioFormat) {
+    audio_format_t androidFormat;
+    switch (aaudioFormat) {
+    case AAUDIO_FORMAT_PCM_I16:
+        androidFormat = AUDIO_FORMAT_PCM_16_BIT;
+        break;
+    case AAUDIO_FORMAT_PCM_FLOAT:
+        androidFormat = AUDIO_FORMAT_PCM_FLOAT;
+        break;
+    case AAUDIO_FORMAT_PCM_I8_24:
+        androidFormat = AUDIO_FORMAT_PCM_8_24_BIT;
+        break;
+    case AAUDIO_FORMAT_PCM_I32:
+        androidFormat = AUDIO_FORMAT_PCM_32_BIT;
+        break;
+    default:
+        androidFormat = AUDIO_FORMAT_DEFAULT;
+        ALOGE("AAudioConvert_aaudioToAndroidDataFormat 0x%08X unrecognized", aaudioFormat);
+        break;
+    }
+    return androidFormat;
+}
+
+aaudio_audio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t androidFormat) {
+    aaudio_audio_format_t aaudioFormat = AAUDIO_FORMAT_INVALID;
+    switch (androidFormat) {
+    case AUDIO_FORMAT_PCM_16_BIT:
+        aaudioFormat = AAUDIO_FORMAT_PCM_I16;
+        break;
+    case AUDIO_FORMAT_PCM_FLOAT:
+        aaudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
+        break;
+    case AUDIO_FORMAT_PCM_32_BIT:
+        aaudioFormat = AAUDIO_FORMAT_PCM_I32;
+        break;
+    case AUDIO_FORMAT_PCM_8_24_BIT:
+        aaudioFormat = AAUDIO_FORMAT_PCM_I8_24;
+        break;
+    default:
+        aaudioFormat = AAUDIO_FORMAT_INVALID;
+        ALOGE("AAudioConvert_androidToAAudioDataFormat 0x%08X unrecognized", androidFormat);
+        break;
+    }
+    return aaudioFormat;
+}
+
+aaudio_size_bytes_t AAudioConvert_framesToBytes(aaudio_size_frames_t numFrames,
+                                            aaudio_size_bytes_t bytesPerFrame,
+                                            aaudio_size_bytes_t *sizeInBytes) {
+    // TODO implement more elegantly
+    const int32_t maxChannels = 256; // ridiculously large
+    const aaudio_size_frames_t maxBytesPerFrame = maxChannels * sizeof(float);
+    // Prevent overflow by limiting multiplicands.
+    if (bytesPerFrame > maxBytesPerFrame || numFrames > (0x3FFFFFFF / maxBytesPerFrame)) {
+        ALOGE("size overflow, numFrames = %d, frameSize = %zd", numFrames, bytesPerFrame);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    *sizeInBytes = numFrames * bytesPerFrame;
+    return AAUDIO_OK;
+}
diff --git a/media/liboboe/src/utility/AAudioUtilities.h b/media/liboboe/src/utility/AAudioUtilities.h
new file mode 100644
index 0000000..38696df
--- /dev/null
+++ b/media/liboboe/src/utility/AAudioUtilities.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef UTILITY_AAUDIO_UTILITIES_H
+#define UTILITY_AAUDIO_UTILITIES_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <hardware/audio.h>
+
+#include "aaudio/AAudioDefinitions.h"
+
+/**
+ * Convert an AAudio result into the closest matching Android status.
+ */
+android::status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result);
+
+/**
+ * Convert an Android status into the closest matching AAudio result.
+ */
+aaudio_result_t AAudioConvert_androidToAAudioResult(android::status_t status);
+
+void AAudioConvert_floatToPcm16(const float *source, int32_t numSamples, int16_t *destination);
+
+void AAudioConvert_pcm16ToFloat(const int16_t *source, int32_t numSamples, float *destination);
+
+/**
+ * Calculate the number of bytes and prevent numeric overflow.
+ * @param numFrames frame count
+ * @param bytesPerFrame size of a frame in bytes
+ * @param sizeInBytes total size in bytes
+ * @return AAUDIO_OK or negative error, eg. AAUDIO_ERROR_OUT_OF_RANGE
+ */
+aaudio_size_bytes_t AAudioConvert_framesToBytes(aaudio_size_frames_t numFrames,
+                                            aaudio_size_bytes_t bytesPerFrame,
+                                            aaudio_size_bytes_t *sizeInBytes);
+
+audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_audio_format_t aaudio_format);
+
+aaudio_audio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t format);
+
+/**
+ * @return the size of a sample of the given format in bytes or AAUDIO_ERROR_ILLEGAL_ARGUMENT
+ */
+aaudio_size_bytes_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format);
+
+#endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/liboboe/src/utility/AudioClock.h b/media/liboboe/src/utility/AudioClock.h
index da2f74a..9ac21d3 100644
--- a/media/liboboe/src/utility/AudioClock.h
+++ b/media/liboboe/src/utility/AudioClock.h
@@ -14,41 +14,41 @@
  * limitations under the License.
  */
 
-#ifndef UTILITY_AUDIOCLOCK_H
-#define UTILITY_AUDIOCLOCK_H
+#ifndef UTILITY_AUDIO_CLOCK_H
+#define UTILITY_AUDIO_CLOCK_H
 
-#include <sys/types.h>
+#include <stdint.h>
 #include <time.h>
-#include "oboe/OboeDefinitions.h"
-#include "oboe/OboeAudio.h"
+
+#include <aaudio/AAudioDefinitions.h>
 
 class AudioClock {
 public:
-    static oboe_nanoseconds_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
+    static aaudio_nanoseconds_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
         struct timespec time;
         int result = clock_gettime(clockId, &time);
         if (result < 0) {
             return -errno;
         }
-        return (time.tv_sec * OBOE_NANOS_PER_SECOND) + time.tv_nsec;
+        return (time.tv_sec * AAUDIO_NANOS_PER_SECOND) + time.tv_nsec;
     }
 
     /**
      * Sleep until the specified absolute time.
-     * Return immediately with OBOE_ERROR_ILLEGAL_ARGUMENT if a negative
+     * Return immediately with AAUDIO_ERROR_ILLEGAL_ARGUMENT if a negative
      * nanoTime is specified.
      *
      * @param nanoTime time to wake up
      * @param clockId CLOCK_MONOTONIC is default
      * @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
      */
-    static int sleepUntilNanoTime(oboe_nanoseconds_t nanoTime,
+    static int sleepUntilNanoTime(aaudio_nanoseconds_t nanoTime,
                                   clockid_t clockId = CLOCK_MONOTONIC) {
         if (nanoTime > 0) {
             struct timespec time;
-            time.tv_sec = nanoTime / OBOE_NANOS_PER_SECOND;
+            time.tv_sec = nanoTime / AAUDIO_NANOS_PER_SECOND;
             // Calculate the fractional nanoseconds. Avoids expensive % operation.
-            time.tv_nsec = nanoTime - (time.tv_sec * OBOE_NANOS_PER_SECOND);
+            time.tv_nsec = nanoTime - (time.tv_sec * AAUDIO_NANOS_PER_SECOND);
             int err = clock_nanosleep(clockId, TIMER_ABSTIME, &time, nullptr);
             switch (err) {
             case EINTR:
@@ -60,7 +60,7 @@
                 return 0 - err;
             }
         } else {
-            return OBOE_ERROR_ILLEGAL_ARGUMENT;
+            return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
         }
     }
 
@@ -72,12 +72,12 @@
      * @param clockId CLOCK_MONOTONIC is default
      * @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
      */
-    static int sleepForNanos(oboe_nanoseconds_t nanoseconds, clockid_t clockId = CLOCK_MONOTONIC) {
+    static int sleepForNanos(aaudio_nanoseconds_t nanoseconds, clockid_t clockId = CLOCK_MONOTONIC) {
         if (nanoseconds > 0) {
             struct timespec time;
-            time.tv_sec = nanoseconds / OBOE_NANOS_PER_SECOND;
+            time.tv_sec = nanoseconds / AAUDIO_NANOS_PER_SECOND;
             // Calculate the fractional nanoseconds. Avoids expensive % operation.
-            time.tv_nsec = nanoseconds - (time.tv_sec * OBOE_NANOS_PER_SECOND);
+            time.tv_nsec = nanoseconds - (time.tv_sec * AAUDIO_NANOS_PER_SECOND);
             const int flags = 0; // documented as relative sleep
             int err = clock_nanosleep(clockId, flags, &time, nullptr);
             switch (err) {
@@ -95,4 +95,4 @@
 };
 
 
-#endif // UTILITY_AUDIOCLOCK_H
+#endif // UTILITY_AUDIO_CLOCK_H
diff --git a/media/liboboe/src/utility/HandleTracker.cpp b/media/liboboe/src/utility/HandleTracker.cpp
index be2a64c..c4880b8 100644
--- a/media/liboboe/src/utility/HandleTracker.cpp
+++ b/media/liboboe/src/utility/HandleTracker.cpp
@@ -15,16 +15,20 @@
  *
  */
 
-#define LOG_TAG "OboeAudio"
+#define LOG_TAG "AAudio"
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include <stdint.h>
 #include <assert.h>
+#include <new>
+#include <stdint.h>
+#include <utils/Mutex.h>
 
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
 #include "HandleTracker.h"
 
+using android::Mutex;
+
 // Handle format is: tgggiiii
 // where each letter is 4 bits, t=type, g=generation, i=index
 
@@ -51,40 +55,45 @@
 
 HandleTracker::HandleTracker(uint32_t maxHandles)
         : mMaxHandleCount(maxHandles)
-        , mHandleAddresses(nullptr)
         , mHandleHeaders(nullptr)
 {
     assert(maxHandles <= HANDLE_TRACKER_MAX_HANDLES);
     // Allocate arrays to hold addresses and validation info.
-    mHandleAddresses = (handle_tracker_address_t *) new handle_tracker_address_t[maxHandles];
+    mHandleAddresses = (handle_tracker_address_t *)
+            new(std::nothrow) handle_tracker_address_t[maxHandles];
     if (mHandleAddresses != nullptr) {
-        mHandleHeaders = new handle_tracker_header_t[maxHandles];
+        mHandleHeaders = new(std::nothrow) handle_tracker_header_t[maxHandles];
+
         if (mHandleHeaders != nullptr) {
-            // Initialize linked list of free nodes. NULL terminated.
+            handle_tracker_header_t initialHeader = buildHeader(0, 1);
+            // Initialize linked list of free nodes. nullptr terminated.
             for (uint32_t i = 0; i < (maxHandles - 1); i++) {
                 mHandleAddresses[i] = &mHandleAddresses[i + 1]; // point to next node
-                mHandleHeaders[i] = 0;
+                mHandleHeaders[i] = initialHeader;
             }
             mNextFreeAddress = &mHandleAddresses[0];
             mHandleAddresses[maxHandles - 1] = nullptr;
             mHandleHeaders[maxHandles - 1] = 0;
         } else {
             delete[] mHandleAddresses; // so the class appears uninitialized
+            mHandleAddresses = nullptr;
         }
     }
 }
 
 HandleTracker::~HandleTracker()
 {
+    Mutex::Autolock _l(mLock);
     delete[] mHandleAddresses;
     delete[] mHandleHeaders;
+    mHandleAddresses = nullptr;
 }
 
 bool HandleTracker::isInitialized() const {
     return mHandleAddresses != nullptr;
 }
 
-handle_tracker_slot_t HandleTracker::allocateSlot() {
+handle_tracker_slot_t HandleTracker::allocateSlot_l() {
     void **allocated = mNextFreeAddress;
     if (allocated == nullptr) {
         return SLOT_UNAVAILABLE;
@@ -94,7 +103,7 @@
     return (allocated - mHandleAddresses);
 }
 
-handle_tracker_generation_t HandleTracker::nextGeneration(handle_tracker_slot_t index) {
+handle_tracker_generation_t HandleTracker::nextGeneration_l(handle_tracker_slot_t index) {
     handle_tracker_generation_t generation = (mHandleHeaders[index] + 1) & GENERATION_MASK;
     // Avoid generation zero so that 0x0 is not a valid handle.
     if (generation == GENERATION_INVALID) {
@@ -103,24 +112,26 @@
     return generation;
 }
 
-oboe_handle_t HandleTracker::put(handle_tracker_type_t type, void *address)
+aaudio_handle_t HandleTracker::put(handle_tracker_type_t type, void *address)
 {
     if (type < 0 || type >= HANDLE_TRACKER_MAX_TYPES) {
-        return static_cast<oboe_handle_t>(OBOE_ERROR_OUT_OF_RANGE);
+        return static_cast<aaudio_handle_t>(AAUDIO_ERROR_OUT_OF_RANGE);
     }
     if (!isInitialized()) {
-        return static_cast<oboe_handle_t>(OBOE_ERROR_NO_MEMORY);
+        return static_cast<aaudio_handle_t>(AAUDIO_ERROR_NO_MEMORY);
     }
 
+    Mutex::Autolock _l(mLock);
+
     // Find an empty slot.
-    handle_tracker_slot_t index = allocateSlot();
+    handle_tracker_slot_t index = allocateSlot_l();
     if (index == SLOT_UNAVAILABLE) {
         ALOGE("HandleTracker::put() no room for more handles");
-        return static_cast<oboe_handle_t>(OBOE_ERROR_NO_FREE_HANDLES);
+        return static_cast<aaudio_handle_t>(AAUDIO_ERROR_NO_FREE_HANDLES);
     }
 
     // Cycle the generation counter so stale handles can be detected.
-    handle_tracker_generation_t generation = nextGeneration(index); // reads header table
+    handle_tracker_generation_t generation = nextGeneration_l(index); // reads header table
     handle_tracker_header_t inputHeader = buildHeader(type, generation);
 
     // These two writes may need to be observed by other threads or cores during get().
@@ -129,48 +140,55 @@
     // TODO use store release to enforce memory order with get()
 
     // Generate a handle.
-    oboe_handle_t handle = buildHandle(inputHeader, index);
+    aaudio_handle_t handle = buildHandle(inputHeader, index);
 
-    //ALOGD("HandleTracker::put(%p) returns 0x%08x", address, handle);
+    ALOGV("HandleTracker::put(%p) returns 0x%08x", address, handle);
     return handle;
 }
 
 handle_tracker_slot_t HandleTracker::handleToIndex(handle_tracker_type_t type,
-                                                   oboe_handle_t handle) const
+                                                   aaudio_handle_t handle) const
 {
     // Validate the handle.
     handle_tracker_slot_t index = extractIndex(handle);
     if (index >= mMaxHandleCount) {
         ALOGE("HandleTracker::handleToIndex() invalid handle = 0x%08X", handle);
-        return static_cast<oboe_handle_t>(OBOE_ERROR_INVALID_HANDLE);
+        return static_cast<aaudio_handle_t>(AAUDIO_ERROR_INVALID_HANDLE);
     }
     handle_tracker_generation_t handleGeneration = extractGeneration(handle);
     handle_tracker_header_t inputHeader = buildHeader(type, handleGeneration);
+    // We do not need to synchronize this access to mHandleHeaders because it is constant for
+    // the lifetime of the handle.
     if (inputHeader != mHandleHeaders[index]) {
         ALOGE("HandleTracker::handleToIndex() inputHeader = 0x%08x != mHandleHeaders[%d] = 0x%08x",
              inputHeader, index, mHandleHeaders[index]);
-        return static_cast<oboe_handle_t>(OBOE_ERROR_INVALID_HANDLE);
+        return static_cast<aaudio_handle_t>(AAUDIO_ERROR_INVALID_HANDLE);
     }
     return index;
 }
 
-handle_tracker_address_t HandleTracker::get(handle_tracker_type_t type, oboe_handle_t handle) const
+handle_tracker_address_t HandleTracker::get(handle_tracker_type_t type, aaudio_handle_t handle) const
 {
     if (!isInitialized()) {
         return nullptr;
     }
     handle_tracker_slot_t index = handleToIndex(type, handle);
     if (index >= 0) {
+        // We do not need to synchronize this access to mHandleHeaders because this slot
+        // is allocated and, therefore, not part of the linked list of free slots.
         return mHandleAddresses[index];
     } else {
         return nullptr;
     }
 }
 
-handle_tracker_address_t HandleTracker::remove(handle_tracker_type_t type, oboe_handle_t handle) {
+handle_tracker_address_t HandleTracker::remove(handle_tracker_type_t type, aaudio_handle_t handle) {
     if (!isInitialized()) {
         return nullptr;
     }
+
+    Mutex::Autolock _l(mLock);
+
     handle_tracker_slot_t index = handleToIndex(type,handle);
     if (index >= 0) {
         handle_tracker_address_t address = mHandleAddresses[index];
@@ -190,9 +208,9 @@
     }
 }
 
-oboe_handle_t HandleTracker::buildHandle(handle_tracker_header_t typeGeneration,
+aaudio_handle_t HandleTracker::buildHandle(handle_tracker_header_t typeGeneration,
                                          handle_tracker_slot_t index) {
-    return (oboe_handle_t)((typeGeneration << GENERATION_SHIFT) | (index & INDEX_MASK));
+    return (aaudio_handle_t)((typeGeneration << GENERATION_SHIFT) | (index & INDEX_MASK));
 }
 
 handle_tracker_header_t HandleTracker::buildHeader(handle_tracker_type_t type,
@@ -202,12 +220,12 @@
         | (generation & GENERATION_MASK));
 }
 
-handle_tracker_slot_t HandleTracker::extractIndex(oboe_handle_t handle)
+handle_tracker_slot_t HandleTracker::extractIndex(aaudio_handle_t handle)
 {
     return handle & INDEX_MASK;
 }
 
-handle_tracker_generation_t HandleTracker::extractGeneration(oboe_handle_t handle)
+handle_tracker_generation_t HandleTracker::extractGeneration(aaudio_handle_t handle)
 {
     return (handle >> GENERATION_SHIFT) & GENERATION_MASK;
 }
diff --git a/media/liboboe/src/utility/HandleTracker.h b/media/liboboe/src/utility/HandleTracker.h
index 37dbac8..c80860c 100644
--- a/media/liboboe/src/utility/HandleTracker.h
+++ b/media/liboboe/src/utility/HandleTracker.h
@@ -14,10 +14,11 @@
  * limitations under the License.
  */
 
-#ifndef UTILITY_HANDLETRACKER_H
-#define UTILITY_HANDLETRACKER_H
+#ifndef UTILITY_HANDLE_TRACKER_H
+#define UTILITY_HANDLE_TRACKER_H
 
 #include <stdint.h>
+#include <utils/Mutex.h>
 
 typedef int32_t  handle_tracker_type_t;       // what kind of handle
 typedef int32_t  handle_tracker_slot_t;       // index in allocation table
@@ -41,7 +42,7 @@
     /**
      * @param maxHandles cannot exceed HANDLE_TRACKER_MAX_HANDLES
      */
-    HandleTracker(uint32_t maxHandles);
+    HandleTracker(uint32_t maxHandles = 256);
     virtual ~HandleTracker();
 
     /**
@@ -53,11 +54,13 @@
     /**
      * Store a pointer and return a handle that can be used to retrieve the pointer.
      *
+     * It is safe to call put() or remove() from multiple threads.
+     *
      * @param expectedType the type of the object to be tracked
      * @param address pointer to be converted to a handle
      * @return a valid handle or a negative error
      */
-    oboe_handle_t put(handle_tracker_type_t expectedType, handle_tracker_address_t address);
+    aaudio_handle_t put(handle_tracker_type_t expectedType, handle_tracker_address_t address);
 
     /**
      * Get the original pointer associated with the handle.
@@ -69,37 +72,50 @@
      * @param handle to be converted to a pointer
      * @return address associated with handle or nullptr
      */
-    handle_tracker_address_t get(handle_tracker_type_t expectedType, oboe_handle_t handle) const;
+    handle_tracker_address_t get(handle_tracker_type_t expectedType, aaudio_handle_t handle) const;
 
     /**
      * Free up the storage associated with the handle.
      * Subsequent attempts to use the handle will fail.
      *
+     * Do NOT remove() a handle while get() is being called for the same handle from another thread.
+     *
      * @param expectedType shouldmatch the type we passed to put()
      * @param handle to be removed from tracking
      * @return address associated with handle or nullptr if not found
      */
-    handle_tracker_address_t remove(handle_tracker_type_t expectedType, oboe_handle_t handle);
+    handle_tracker_address_t remove(handle_tracker_type_t expectedType, aaudio_handle_t handle);
 
 private:
     const int32_t               mMaxHandleCount;   // size of array
-    // This is const after initialization.
+    // This address is const after initialization.
     handle_tracker_address_t  * mHandleAddresses;  // address of objects or a free linked list node
-    // This is const after initialization.
+    // This address is const after initialization.
     handle_tracker_header_t   * mHandleHeaders;    // combination of type and generation
-    handle_tracker_address_t  * mNextFreeAddress; // head of the linked list of free nodes in mHandleAddresses
+    // head of the linked list of free nodes in mHandleAddresses
+    handle_tracker_address_t  * mNextFreeAddress;
+
+    // This Mutex protects the linked list of free nodes.
+    // The list is managed using mHandleAddresses and mNextFreeAddress.
+    // The data in mHandleHeaders is only changed by put() and remove().
+    android::Mutex              mLock;
 
     /**
      * Pull slot off of a list of empty slots.
      * @return index or a negative error
      */
-    handle_tracker_slot_t allocateSlot();
+    handle_tracker_slot_t allocateSlot_l();
+
+    /**
+     * Increment the generation for the slot, avoiding zero.
+     */
+    handle_tracker_generation_t nextGeneration_l(handle_tracker_slot_t index);
 
     /**
      * Validate the handle and return the corresponding index.
      * @return slot index or a negative error
      */
-    handle_tracker_slot_t handleToIndex(oboe_handle_t handle, handle_tracker_type_t type) const;
+    handle_tracker_slot_t handleToIndex(aaudio_handle_t handle, handle_tracker_type_t type) const;
 
     /**
      * Construct a handle from a header and an index.
@@ -107,7 +123,7 @@
      * @param index slot index returned from allocateSlot
      * @return handle or a negative error
      */
-    oboe_handle_t buildHandle(handle_tracker_header_t header, handle_tracker_slot_t index);
+    static aaudio_handle_t buildHandle(handle_tracker_header_t header, handle_tracker_slot_t index);
 
     /**
      * Combine a type and a generation field into a header.
@@ -120,20 +136,15 @@
      * Does not validate the handle.
      * @return index associated with a handle
      */
-    static handle_tracker_slot_t extractIndex(oboe_handle_t handle);
+    static handle_tracker_slot_t extractIndex(aaudio_handle_t handle);
 
     /**
      * Extract the generation from a handle.
      * Does not validate the handle.
      * @return generation associated with a handle
      */
-    static handle_tracker_generation_t extractGeneration(oboe_handle_t handle);
-
-    /**
-     * Increment the generation for the slot, avoiding zero.
-     */
-    handle_tracker_generation_t nextGeneration(handle_tracker_slot_t index);
+    static handle_tracker_generation_t extractGeneration(aaudio_handle_t handle);
 
 };
 
-#endif //UTILITY_HANDLETRACKER_H
+#endif //UTILITY_HANDLE_TRACKER_H
diff --git a/media/liboboe/src/utility/MonotonicCounter.h b/media/liboboe/src/utility/MonotonicCounter.h
index befad21..81d7f89 100644
--- a/media/liboboe/src/utility/MonotonicCounter.h
+++ b/media/liboboe/src/utility/MonotonicCounter.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef UTILITY_MONOTONICCOUNTER_H
-#define UTILITY_MONOTONICCOUNTER_H
+#ifndef UTILITY_MONOTONIC_COUNTER_H
+#define UTILITY_MONOTONIC_COUNTER_H
 
 #include <stdint.h>
 
@@ -88,4 +88,4 @@
 };
 
 
-#endif //UTILITY_MONOTONICCOUNTER_H
+#endif //UTILITY_MONOTONIC_COUNTER_H
diff --git a/media/liboboe/src/utility/OboeUtilities.cpp b/media/liboboe/src/utility/OboeUtilities.cpp
deleted file mode 100644
index b28f7c7..0000000
--- a/media/liboboe/src/utility/OboeUtilities.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "OboeAudio"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <utils/Errors.h>
-
-#include "oboe/OboeDefinitions.h"
-#include "OboeUtilities.h"
-
-using namespace android;
-
-oboe_size_bytes_t OboeConvert_formatToSizeInBytes(oboe_audio_format_t format) {
-    oboe_datatype_t dataType = OBOE_AUDIO_FORMAT_DATA_TYPE(format);
-    oboe_size_bytes_t size;
-    switch (dataType) {
-        case OBOE_AUDIO_DATATYPE_UINT8:
-            size = sizeof(uint8_t);
-            break;
-        case OBOE_AUDIO_DATATYPE_INT16:
-            size = sizeof(int16_t);
-            break;
-        case OBOE_AUDIO_DATATYPE_INT32:
-        case OBOE_AUDIO_DATATYPE_INT824:
-            size = sizeof(int32_t);
-            break;
-        case OBOE_AUDIO_DATATYPE_FLOAT32:
-            size = sizeof(float);
-            break;
-        default:
-            size = OBOE_ERROR_ILLEGAL_ARGUMENT;
-            break;
-    }
-    return size;
-}
-
-// TODO This similar to a function in audio_utils. Consider using that instead.
-void OboeConvert_floatToPcm16(const float *source, int32_t numSamples, int16_t *destination) {
-    for (int i = 0; i < numSamples; i++) {
-        float fval = source[i];
-        fval += 1.0; // to avoid discontinuity at 0.0 caused by truncation
-        fval *= 32768.0f;
-        int32_t sample = (int32_t) fval;
-        // clip to 16-bit range
-        if (sample < 0) sample = 0;
-        else if (sample > 0x0FFFF) sample = 0x0FFFF;
-        sample -= 32768; // center at zero
-        destination[i] = (int16_t) sample;
-    }
-}
-
-void OboeConvert_pcm16ToFloat(const float *source, int32_t numSamples, int16_t *destination) {
-    for (int i = 0; i < numSamples; i++) {
-        destination[i] = source[i] * (1.0f / 32768.0f);
-    }
-}
-
-oboe_result_t OboeConvert_androidToOboeError(status_t error) {
-    if (error >= 0) {
-        return error;
-    }
-    oboe_result_t result;
-    switch (error) {
-    case OK:
-        result = OBOE_OK;
-        break;
-    case INVALID_OPERATION:
-        result = OBOE_ERROR_INVALID_STATE;
-        break;
-    case BAD_VALUE:
-        result = OBOE_ERROR_UNEXPECTED_VALUE;
-        break;
-    case WOULD_BLOCK:
-        result = OBOE_ERROR_WOULD_BLOCK;
-        break;
-    // TODO add more error codes
-    default:
-        result = OBOE_ERROR_INTERNAL;
-        break;
-    }
-    return result;
-}
-
-audio_format_t OboeConvert_oboeToAndroidDataFormat(oboe_audio_format_t oboeFormat) {
-    audio_format_t androidFormat;
-    switch (oboeFormat) {
-    case OBOE_AUDIO_FORMAT_PCM16:
-        androidFormat = AUDIO_FORMAT_PCM_16_BIT;
-        break;
-    case OBOE_AUDIO_FORMAT_PCM_FLOAT:
-        androidFormat = AUDIO_FORMAT_PCM_FLOAT;
-        break;
-    case OBOE_AUDIO_FORMAT_PCM824:
-        androidFormat = AUDIO_FORMAT_PCM_8_24_BIT;
-        break;
-    case OBOE_AUDIO_FORMAT_PCM32:
-        androidFormat = AUDIO_FORMAT_PCM_32_BIT;
-        break;
-    default:
-        androidFormat = AUDIO_FORMAT_DEFAULT;
-        ALOGE("OboeConvert_oboeToAndroidDataFormat 0x%08X unrecognized", oboeFormat);
-        break;
-    }
-    return androidFormat;
-}
-
-oboe_audio_format_t OboeConvert_androidToOboeDataFormat(audio_format_t androidFormat) {
-    oboe_audio_format_t oboeFormat = OBOE_AUDIO_FORMAT_INVALID;
-    switch (androidFormat) {
-    case AUDIO_FORMAT_PCM_16_BIT:
-        oboeFormat = OBOE_AUDIO_FORMAT_PCM16;
-        break;
-    case AUDIO_FORMAT_PCM_FLOAT:
-        oboeFormat = OBOE_AUDIO_FORMAT_PCM_FLOAT;
-        break;
-    case AUDIO_FORMAT_PCM_32_BIT:
-        oboeFormat = OBOE_AUDIO_FORMAT_PCM32;
-        break;
-    case AUDIO_FORMAT_PCM_8_24_BIT:
-        oboeFormat = OBOE_AUDIO_FORMAT_PCM824;
-        break;
-    default:
-        oboeFormat = OBOE_AUDIO_FORMAT_INVALID;
-        ALOGE("OboeConvert_androidToOboeDataFormat 0x%08X unrecognized", androidFormat);
-        break;
-    }
-    return oboeFormat;
-}
-
-oboe_size_bytes_t OboeConvert_framesToBytes(oboe_size_frames_t numFrames,
-                                            oboe_size_bytes_t bytesPerFrame,
-                                            oboe_size_bytes_t *sizeInBytes) {
-    // TODO implement more elegantly
-    const int32_t maxChannels = 256; // ridiculously large
-    const oboe_size_frames_t maxBytesPerFrame = maxChannels * sizeof(float);
-    // Prevent overflow by limiting multiplicands.
-    if (bytesPerFrame > maxBytesPerFrame || numFrames > (0x3FFFFFFF / maxBytesPerFrame)) {
-        ALOGE("size overflow, numFrames = %d, frameSize = %zd", numFrames, bytesPerFrame);
-        return OBOE_ERROR_OUT_OF_RANGE;
-    }
-    *sizeInBytes = numFrames * bytesPerFrame;
-    return OBOE_OK;
-}
diff --git a/media/liboboe/src/utility/OboeUtilities.h b/media/liboboe/src/utility/OboeUtilities.h
deleted file mode 100644
index 974ccf6..0000000
--- a/media/liboboe/src/utility/OboeUtilities.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef UTILITY_OBOEUTILITIES_H
-#define UTILITY_OBOEUTILITIES_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <utils/Errors.h>
-#include <hardware/audio.h>
-
-#include "oboe/OboeDefinitions.h"
-
-oboe_result_t OboeConvert_androidToOboeError(android::status_t error);
-
-void OboeConvert_floatToPcm16(const float *source, int32_t numSamples, int16_t *destination);
-
-void OboeConvert_pcm16ToFloat(const int16_t *source, int32_t numSamples, float *destination);
-
-/**
- * Calculate the number of bytes and prevent numeric overflow.
- * @param numFrames frame count
- * @param bytesPerFrame size of a frame in bytes
- * @param sizeInBytes total size in bytes
- * @return OBOE_OK or negative error, eg. OBOE_ERROR_OUT_OF_RANGE
- */
-oboe_size_bytes_t OboeConvert_framesToBytes(oboe_size_frames_t numFrames,
-                                            oboe_size_bytes_t bytesPerFrame,
-                                            oboe_size_bytes_t *sizeInBytes);
-
-audio_format_t OboeConvert_oboeToAndroidDataFormat(oboe_audio_format_t oboe_format);
-
-oboe_audio_format_t OboeConvert_androidToOboeDataFormat(audio_format_t format);
-
-/**
- * @return the size of a sample of the given format in bytes or OBOE_ERROR_ILLEGAL_ARGUMENT
- */
-oboe_size_bytes_t OboeConvert_formatToSizeInBytes(oboe_audio_format_t format);
-
-#endif //UTILITY_OBOEUTILITIES_H
diff --git a/media/liboboe/src/utility/README.md b/media/liboboe/src/utility/README.md
index 9db926a..0ac74ea 100644
--- a/media/liboboe/src/utility/README.md
+++ b/media/liboboe/src/utility/README.md
@@ -1,3 +1,3 @@
-The utility folder contains things that may be shared between the Oboe client and server.
-They might also be handy outside Oboe.
-They generally do not depend on Oboe functionality.
+The utility folder contains things that may be shared between the AAudio client and server.
+They might also be handy outside AAudio.
+They generally do not depend on AAudio functionality.
diff --git a/media/liboboe/tests/Android.mk b/media/liboboe/tests/Android.mk
index f2c65d9..16279ec 100644
--- a/media/liboboe/tests/Android.mk
+++ b/media/liboboe/tests/Android.mk
@@ -6,12 +6,11 @@
     frameworks/av/media/liboboe/include \
     frameworks/av/media/liboboe/src/core \
     frameworks/av/media/liboboe/src/utility
-LOCAL_SRC_FILES:= test_oboe_api.cpp
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
-                          libbinder libcutils libutils \
-                          libaudioclient liblog
+LOCAL_SRC_FILES := test_aaudio_api.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+                          libcutils liblog libmedia libutils
 LOCAL_STATIC_LIBRARIES := liboboe
-LOCAL_MODULE := test_oboe_api
+LOCAL_MODULE := test_aaudio_api
 include $(BUILD_NATIVE_TEST)
 
 include $(CLEAR_VARS)
@@ -21,7 +20,23 @@
     frameworks/av/media/liboboe/src/core \
     frameworks/av/media/liboboe/src/utility
 LOCAL_SRC_FILES:= test_handle_tracker.cpp
-LOCAL_SHARED_LIBRARIES := libbinder libcutils libutils liblog
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+                          libcutils liblog libmedia libutils
 LOCAL_STATIC_LIBRARIES := liboboe
 LOCAL_MODULE := test_handle_tracker
 include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include \
+    frameworks/av/media/liboboe/src \
+    frameworks/av/media/liboboe/src/core \
+    frameworks/av/media/liboboe/src/fifo \
+    frameworks/av/media/liboboe/src/utility
+LOCAL_SRC_FILES:= test_marshalling.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+                          libcutils liblog libmedia libutils
+LOCAL_STATIC_LIBRARIES := liboboe
+LOCAL_MODULE := test_marshalling
+include $(BUILD_NATIVE_TEST)
diff --git a/media/liboboe/tests/test_aaudio_api.cpp b/media/liboboe/tests/test_aaudio_api.cpp
new file mode 100644
index 0000000..7db3688
--- /dev/null
+++ b/media/liboboe/tests/test_aaudio_api.cpp
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit tests for AAudio 'C' API.
+
+#include <stdlib.h>
+#include <math.h>
+
+#include <gtest/gtest.h>
+
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+#include "AAudioUtilities.h"
+
+#define DEFAULT_STATE_TIMEOUT  (500 * AAUDIO_NANOS_PER_MILLISECOND)
+
+// Test AAudioStreamBuilder
+TEST(test_aaudio_api, aaudio_stream_builder) {
+    const aaudio_sample_rate_t requestedSampleRate1 = 48000;
+    const aaudio_sample_rate_t requestedSampleRate2 = 44100;
+    const int32_t requestedSamplesPerFrame = 2;
+    const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM16;
+
+    aaudio_sample_rate_t sampleRate = 0;
+    int32_t samplesPerFrame = 0;
+    aaudio_audio_format_t actualDataFormat;
+    AAudioStreamBuilder aaudioBuilder1;
+    AAudioStreamBuilder aaudioBuilder2;
+
+    aaudio_result_t result = AAUDIO_OK;
+
+    // Use an AAudioStreamBuilder to define the stream.
+    result = AAudio_createStreamBuilder(&aaudioBuilder1);
+    ASSERT_EQ(AAUDIO_OK, result);
+
+    // Request stream properties.
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSampleRate(aaudioBuilder1, requestedSampleRate1));
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder1, requestedSamplesPerFrame));
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setFormat(aaudioBuilder1, requestedDataFormat));
+
+    // Check to make sure builder saved the properties.
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getSampleRate(aaudioBuilder1, &sampleRate));
+    EXPECT_EQ(requestedSampleRate1, sampleRate);
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getSamplesPerFrame(aaudioBuilder1, &samplesPerFrame));
+    EXPECT_EQ(requestedSamplesPerFrame, samplesPerFrame);
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getFormat(aaudioBuilder1, &actualDataFormat));
+    EXPECT_EQ(requestedDataFormat, actualDataFormat);
+
+    result = AAudioStreamBuilder_getSampleRate(0x0BADCAFE, &sampleRate); // ridiculous token
+    EXPECT_EQ(AAUDIO_ERROR_INVALID_HANDLE, result);
+
+    // Create a second builder and make sure they do not collide.
+    ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder2));
+    ASSERT_NE(aaudioBuilder1, aaudioBuilder2);
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSampleRate(aaudioBuilder2, requestedSampleRate2));
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getSampleRate(aaudioBuilder1, &sampleRate));
+    EXPECT_EQ(requestedSampleRate1, sampleRate);
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getSampleRate(aaudioBuilder2, &sampleRate));
+    EXPECT_EQ(requestedSampleRate2, sampleRate);
+
+    // Delete the builder.
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_delete(aaudioBuilder1));
+
+    // Now it should no longer be valid.
+    // Note that test assumes we are using the HandleTracker. If we use plain pointers
+    // then it will be difficult to detect this kind of error.
+    result = AAudioStreamBuilder_getSampleRate(aaudioBuilder1, &sampleRate); // stale token
+    EXPECT_EQ(AAUDIO_ERROR_INVALID_HANDLE, result);
+
+    // Second builder should still be valid.
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_getSampleRate(aaudioBuilder2, &sampleRate));
+    EXPECT_EQ(requestedSampleRate2, sampleRate);
+
+    // Delete the second builder.
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_delete(aaudioBuilder2));
+
+    // Now it should no longer be valid. Assumes HandlerTracker used.
+    EXPECT_EQ(AAUDIO_ERROR_INVALID_HANDLE, AAudioStreamBuilder_getSampleRate(aaudioBuilder2, &sampleRate));
+}
+
+// Test creating a default stream with everything unspecified.
+TEST(test_aaudio_api, aaudio_stream_unspecified) {
+    AAudioStreamBuilder aaudioBuilder;
+    AAudioStream aaudioStream;
+    aaudio_result_t result = AAUDIO_OK;
+
+    // Use an AAudioStreamBuilder to define the stream.
+    result = AAudio_createStreamBuilder(&aaudioBuilder);
+    ASSERT_EQ(AAUDIO_OK, result);
+
+    // Create an AAudioStream using the Builder.
+    ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+    // Cleanup
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_delete(aaudioBuilder));
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+}
+
+// Test Writing to an AAudioStream
+void runtest_aaudio_stream(aaudio_sharing_mode_t requestedSharingMode) {
+    const aaudio_sample_rate_t requestedSampleRate = 48000;
+    const aaudio_sample_rate_t requestedSamplesPerFrame = 2;
+    const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM16;
+
+    aaudio_sample_rate_t actualSampleRate = -1;
+    int32_t actualSamplesPerFrame = -1;
+    aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_INVALID;
+    aaudio_sharing_mode_t actualSharingMode;
+    aaudio_size_frames_t framesPerBurst = -1;
+    int writeLoops = 0;
+
+    aaudio_size_frames_t framesWritten = 0;
+    aaudio_size_frames_t framesPrimed = 0;
+    aaudio_position_frames_t framesTotal = 0;
+    aaudio_position_frames_t aaudioFramesRead = 0;
+    aaudio_position_frames_t aaudioFramesRead1 = 0;
+    aaudio_position_frames_t aaudioFramesRead2 = 0;
+    aaudio_position_frames_t aaudioFramesWritten = 0;
+
+    aaudio_nanoseconds_t timeoutNanos;
+
+    aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
+    AAudioStreamBuilder aaudioBuilder;
+    AAudioStream aaudioStream;
+
+    aaudio_result_t result = AAUDIO_OK;
+
+    // Use an AAudioStreamBuilder to define the stream.
+    result = AAudio_createStreamBuilder(&aaudioBuilder);
+    ASSERT_EQ(AAUDIO_OK, result);
+
+    // Request stream properties.
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate));
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder, requestedSamplesPerFrame));
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat));
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode));
+
+    // Create an AAudioStream using the Builder.
+    ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_delete(aaudioBuilder));
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_getState(aaudioStream, &state));
+    EXPECT_EQ(AAUDIO_STREAM_STATE_OPEN, state);
+
+    // Check to see what kind of stream we actually got.
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_getSampleRate(aaudioStream, &actualSampleRate));
+    ASSERT_TRUE(actualSampleRate >= 44100 && actualSampleRate <= 96000);  // TODO what is range?
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_getSamplesPerFrame(aaudioStream, &actualSamplesPerFrame));
+    ASSERT_TRUE(actualSamplesPerFrame >= 1 && actualSamplesPerFrame <= 16); // TODO what is max?
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_getSharingMode(aaudioStream, &actualSharingMode));
+    ASSERT_TRUE(actualSharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE
+                || actualSharingMode == AAUDIO_SHARING_MODE_LEGACY);
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_getFormat(aaudioStream, &actualDataFormat));
+    EXPECT_NE(AAUDIO_FORMAT_INVALID, actualDataFormat);
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesPerBurst(aaudioStream, &framesPerBurst));
+    ASSERT_TRUE(framesPerBurst >= 16 && framesPerBurst <= 1024); // TODO what is min/max?
+
+    // Allocate a buffer for the audio data.
+    // TODO handle possibility of other data formats
+    ASSERT_TRUE(actualDataFormat == AAUDIO_FORMAT_PCM16);
+    size_t dataSizeSamples = framesPerBurst * actualSamplesPerFrame;
+    int16_t *data = new int16_t[dataSizeSamples];
+    ASSERT_TRUE(nullptr != data);
+    memset(data, 0, sizeof(int16_t) * dataSizeSamples);
+
+    // Prime the buffer.
+    timeoutNanos = 0;
+    do {
+        framesWritten = AAudioStream_write(aaudioStream, data, framesPerBurst, timeoutNanos);
+        // There should be some room for priming the buffer.
+        framesTotal += framesWritten;
+        ASSERT_GE(framesWritten, 0);
+        ASSERT_LE(framesWritten, framesPerBurst);
+    } while (framesWritten > 0);
+    ASSERT_TRUE(framesTotal > 0);
+
+    // Start/write/pause more than once to see if it fails after the first time.
+    // Write some data and measure the rate to see if the timing is OK.
+    for (int numLoops = 0; numLoops < 2; numLoops++) {
+        // Start and wait for server to respond.
+        ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+        ASSERT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+                                                         AAUDIO_STREAM_STATE_STARTING,
+                                                         &state,
+                                                         DEFAULT_STATE_TIMEOUT));
+        EXPECT_EQ(AAUDIO_STREAM_STATE_STARTED, state);
+
+        // Write some data while we are running. Read counter should be advancing.
+        writeLoops = 1 * actualSampleRate / framesPerBurst; // 1 second
+        ASSERT_LT(2, writeLoops); // detect absurdly high framesPerBurst
+        timeoutNanos = 10 * AAUDIO_NANOS_PER_SECOND * framesPerBurst / actualSampleRate; // bursts
+        framesWritten = 1;
+        ASSERT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead));
+        aaudioFramesRead1 = aaudioFramesRead;
+        aaudio_nanoseconds_t beginTime = AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC);
+        do {
+            framesWritten = AAudioStream_write(aaudioStream, data, framesPerBurst, timeoutNanos);
+            ASSERT_GE(framesWritten, 0);
+            ASSERT_LE(framesWritten, framesPerBurst);
+
+            framesTotal += framesWritten;
+            EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesWritten(aaudioStream, &aaudioFramesWritten));
+            EXPECT_EQ(framesTotal, aaudioFramesWritten);
+
+            // Try to get a more accurate measure of the sample rate.
+            if (beginTime == 0) {
+                EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead));
+                if (aaudioFramesRead > aaudioFramesRead1) { // is read pointer advancing
+                    beginTime = AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC);
+                    aaudioFramesRead1 = aaudioFramesRead;
+                }
+            }
+        } while (framesWritten > 0 && writeLoops-- > 0);
+
+        EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead2));
+        aaudio_nanoseconds_t endTime = AAudio_getNanoseconds(AAUDIO_CLOCK_MONOTONIC);
+        ASSERT_GT(aaudioFramesRead2, 0);
+        ASSERT_GT(aaudioFramesRead2, aaudioFramesRead1);
+        ASSERT_LE(aaudioFramesRead2, aaudioFramesWritten);
+
+        // TODO why is legacy so inaccurate?
+        const double rateTolerance = 200.0; // arbitrary tolerance for sample rate
+        if (requestedSharingMode != AAUDIO_SHARING_MODE_LEGACY) {
+            // Calculate approximate sample rate and compare with stream rate.
+            double seconds = (endTime - beginTime) / (double) AAUDIO_NANOS_PER_SECOND;
+            double measuredRate = (aaudioFramesRead2 - aaudioFramesRead1) / seconds;
+            ASSERT_NEAR(actualSampleRate, measuredRate, rateTolerance);
+        }
+
+        // Request async pause and wait for server to say that it has completed the pause.
+        ASSERT_EQ(AAUDIO_OK, AAudioStream_requestPause(aaudioStream));
+        EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+                                                AAUDIO_STREAM_STATE_PAUSING,
+                                                &state,
+                                                DEFAULT_STATE_TIMEOUT));
+        EXPECT_EQ(AAUDIO_STREAM_STATE_PAUSED, state);
+    }
+
+    // Make sure the read counter is not advancing when we are paused.
+    ASSERT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead));
+    ASSERT_GE(aaudioFramesRead, aaudioFramesRead2); // monotonic increase
+
+    // Use this to sleep by waiting for something that won't happen.
+    AAudioStream_waitForStateChange(aaudioStream, AAUDIO_STREAM_STATE_PAUSED, &state, timeoutNanos);
+    ASSERT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead2));
+    EXPECT_EQ(aaudioFramesRead, aaudioFramesRead2);
+
+    // ------------------- TEST FLUSH -----------------
+    // Prime the buffer.
+    timeoutNanos = 0;
+    writeLoops = 100;
+    do {
+        framesWritten = AAudioStream_write(aaudioStream, data, framesPerBurst, timeoutNanos);
+        framesTotal += framesWritten;
+    } while (framesWritten > 0 && writeLoops-- > 0);
+    EXPECT_EQ(0, framesWritten);
+
+    // Flush and wait for server to respond.
+    ASSERT_EQ(AAUDIO_OK, AAudioStream_requestFlush(aaudioStream));
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+                                                     AAUDIO_STREAM_STATE_FLUSHING,
+                                                     &state,
+                                                     DEFAULT_STATE_TIMEOUT));
+    EXPECT_EQ(AAUDIO_STREAM_STATE_FLUSHED, state);
+
+    // After a flush, the read counter should be caught up with the write counter.
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesWritten(aaudioStream, &aaudioFramesWritten));
+    EXPECT_EQ(framesTotal, aaudioFramesWritten);
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_getFramesRead(aaudioStream, &aaudioFramesRead));
+    EXPECT_EQ(aaudioFramesRead, aaudioFramesWritten);
+
+    // The buffer should be empty after a flush so we should be able to write.
+    framesWritten = AAudioStream_write(aaudioStream, data, framesPerBurst, timeoutNanos);
+    // There should be some room for priming the buffer.
+    ASSERT_TRUE(framesWritten > 0 && framesWritten <= framesPerBurst);
+
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+}
+
+// Test Writing to an AAudioStream using LEGACY sharing mode.
+TEST(test_aaudio_api, aaudio_stream_legacy) {
+    runtest_aaudio_stream(AAUDIO_SHARING_MODE_LEGACY);
+}
+
+// Test Writing to an AAudioStream using EXCLUSIVE sharing mode.
+TEST(test_aaudio_api, aaudio_stream_exclusive) {
+    runtest_aaudio_stream(AAUDIO_SHARING_MODE_EXCLUSIVE);
+}
+
+#define AAUDIO_THREAD_ANSWER          1826375
+#define AAUDIO_THREAD_DURATION_MSEC       500
+
+static void *TestAAudioStreamThreadProc(void *arg) {
+    AAudioStream aaudioStream = (AAudioStream) reinterpret_cast<size_t>(arg);
+    aaudio_stream_state_t state;
+
+    // Use this to sleep by waiting for something that won't happen.
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_getState(aaudioStream, &state));
+    AAudioStream_waitForStateChange(aaudioStream, AAUDIO_STREAM_STATE_PAUSED, &state,
+            AAUDIO_THREAD_DURATION_MSEC * AAUDIO_NANOS_PER_MILLISECOND);
+    return reinterpret_cast<void *>(AAUDIO_THREAD_ANSWER);
+}
+
+// Test creating a stream related thread.
+TEST(test_aaudio_api, aaudio_stream_thread_basic) {
+    AAudioStreamBuilder aaudioBuilder;
+    AAudioStream aaudioStream;
+    aaudio_result_t result = AAUDIO_OK;
+    void *threadResult;
+
+    // Use an AAudioStreamBuilder to define the stream.
+    result = AAudio_createStreamBuilder(&aaudioBuilder);
+    ASSERT_EQ(AAUDIO_OK, result);
+
+    // Create an AAudioStream using the Builder.
+    ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+    // Start a thread.
+    ASSERT_EQ(AAUDIO_OK, AAudioStream_createThread(aaudioStream,
+            10 * AAUDIO_NANOS_PER_MILLISECOND,
+            TestAAudioStreamThreadProc,
+            reinterpret_cast<void *>(aaudioStream)));
+    // Thread already started.
+    ASSERT_NE(AAUDIO_OK, AAudioStream_createThread(aaudioStream,   // should fail!
+            10 * AAUDIO_NANOS_PER_MILLISECOND,
+            TestAAudioStreamThreadProc,
+            reinterpret_cast<void *>(aaudioStream)));
+
+    // Wait for the thread to finish.
+    ASSERT_EQ(AAUDIO_OK, AAudioStream_joinThread(aaudioStream,
+            &threadResult, 2 * AAUDIO_THREAD_DURATION_MSEC * AAUDIO_NANOS_PER_MILLISECOND));
+    // The thread returns a special answer.
+    ASSERT_EQ(AAUDIO_THREAD_ANSWER, (int)reinterpret_cast<size_t>(threadResult));
+
+    // Thread should already be joined.
+    ASSERT_NE(AAUDIO_OK, AAudioStream_joinThread(aaudioStream,  // should fail!
+            &threadResult, 2 * AAUDIO_THREAD_DURATION_MSEC * AAUDIO_NANOS_PER_MILLISECOND));
+
+    // Cleanup
+    EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_delete(aaudioBuilder));
+    EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+}
diff --git a/media/liboboe/tests/test_handle_tracker.cpp b/media/liboboe/tests/test_handle_tracker.cpp
index ae7384e..e51c39c 100644
--- a/media/liboboe/tests/test_handle_tracker.cpp
+++ b/media/liboboe/tests/test_handle_tracker.cpp
@@ -14,18 +14,18 @@
  * limitations under the License.
  */
 
-// Unit tests for Oboe Handle Tracker
+// Unit tests for AAudio Handle Tracker
 
 #include <stdlib.h>
 #include <math.h>
 
 #include <gtest/gtest.h>
 
-#include <oboe/OboeDefinitions.h>
+#include <aaudio/AAudioDefinitions.h>
 #include "HandleTracker.h"
 
 // Test adding one address.
-TEST(test_handle_tracker, oboe_handle_tracker) {
+TEST(test_handle_tracker, aaudio_handle_tracker) {
     const int MAX_HANDLES = 4;
     HandleTracker tracker(MAX_HANDLES);
     handle_tracker_type_t type = 3; // arbitrary generic type
@@ -40,7 +40,7 @@
         EXPECT_EQ(nullptr, found);
 
         // create a valid handle and use it to lookup the object again
-        oboe_handle_t dataHandle = tracker.put(type, &data);
+        aaudio_handle_t dataHandle = tracker.put(type, &data);
         ASSERT_TRUE(dataHandle > 0);
         found = tracker.get(type, dataHandle);
         EXPECT_EQ(&data, found);
@@ -56,17 +56,17 @@
         EXPECT_EQ(&data, found);
         // should fail the second time
         found = tracker.remove(type, dataHandle);
-        EXPECT_EQ(NULL, found);
+        EXPECT_EQ(nullptr, found);
     }
 }
 
 // Test filling the tracker.
-TEST(test_handle_tracker, oboe_full_up) {
+TEST(test_handle_tracker, aaudio_full_up) {
     const int MAX_HANDLES = 5;
     HandleTracker tracker(MAX_HANDLES);
     handle_tracker_type_t type = 4; // arbitrary generic type
     int data[MAX_HANDLES];
-    oboe_handle_t handles[MAX_HANDLES];
+    aaudio_handle_t handles[MAX_HANDLES];
     handle_tracker_address_t found;
 
     // repeat the test several times to see if it breaks
@@ -81,7 +81,7 @@
         }
 
         // Now that it is full, try to add one more.
-        oboe_handle_t handle = tracker.put(type, &data[0]);
+        aaudio_handle_t handle = tracker.put(type, &data[0]);
         EXPECT_TRUE(handle < 0);
 
         for (int i = 0; i < MAX_HANDLES; i++) {
diff --git a/media/liboboe/tests/test_marshalling.cpp b/media/liboboe/tests/test_marshalling.cpp
new file mode 100644
index 0000000..b1f77c0
--- /dev/null
+++ b/media/liboboe/tests/test_marshalling.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit tests for AAudio Marshalling of RingBuffer information.
+
+#include <stdlib.h>
+#include <math.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <cutils/ashmem.h>
+#include <gtest/gtest.h>
+#include <sys/mman.h>
+
+#include <aaudio/AAudioDefinitions.h>
+#include <binding/AudioEndpointParcelable.h>
+
+using namespace android;
+using namespace aaudio;
+
+// Test adding one value.
+TEST(test_marshalling, aaudio_one_read_write) {
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    const int arbitraryValue = 235;
+    parcel.writeInt32(arbitraryValue);
+    parcel.setDataPosition(pos);
+    int32_t y;
+    parcel.readInt32(&y);
+    EXPECT_EQ(arbitraryValue, y);
+}
+
+// Test SharedMemoryParcel.
+TEST(test_marshalling, aaudio_shared_memory) {
+    SharedMemoryParcelable sharedMemoryA;
+    SharedMemoryParcelable sharedMemoryB;
+    const size_t memSizeBytes = 840;
+    int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+    ASSERT_LE(0, fd);
+    sharedMemoryA.setup(fd, memSizeBytes);
+    void *region1;
+    EXPECT_EQ(AAUDIO_OK, sharedMemoryA.resolve(0, 16, &region1)); // fits in region
+    EXPECT_NE(AAUDIO_OK, sharedMemoryA.resolve(-2, 16, &region1)); // offset is negative
+    EXPECT_NE(AAUDIO_OK, sharedMemoryA.resolve(0, memSizeBytes + 8, &region1)); // size too big
+    EXPECT_NE(AAUDIO_OK, sharedMemoryA.resolve(memSizeBytes - 8, 16, &region1)); // goes past the end
+    int32_t *buffer1 = (int32_t *)region1;
+    buffer1[0] = 98735; // arbitrary value
+
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    sharedMemoryA.writeToParcel(&parcel);
+
+    parcel.setDataPosition(pos);
+    sharedMemoryB.readFromParcel(&parcel);
+    EXPECT_EQ(sharedMemoryA.getSizeInBytes(), sharedMemoryB.getSizeInBytes());
+
+    // should see same value at two different addresses
+    void *region2;
+    EXPECT_EQ(AAUDIO_OK, sharedMemoryB.resolve(0, 16, &region2));
+    int32_t *buffer2 = (int32_t *)region2;
+    EXPECT_NE(buffer1, buffer2);
+    EXPECT_EQ(buffer1[0], buffer2[0]);
+}
+
+// Test SharedRegionParcel.
+TEST(test_marshalling, aaudio_shared_region) {
+    SharedMemoryParcelable sharedMemories[2];
+    SharedRegionParcelable sharedRegionA;
+    SharedRegionParcelable sharedRegionB;
+    const size_t memSizeBytes = 840;
+    int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+    ASSERT_LE(0, fd);
+    sharedMemories[0].setup(fd, memSizeBytes);
+    int32_t regionOffset1 = 32;
+    int32_t regionSize1 = 16;
+    sharedRegionA.setup(0, regionOffset1, regionSize1);
+
+    void *region1;
+    EXPECT_EQ(AAUDIO_OK, sharedRegionA.resolve(sharedMemories, &region1));
+    int32_t *buffer1 = (int32_t *)region1;
+    buffer1[0] = 336677; // arbitrary value
+
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    sharedRegionA.writeToParcel(&parcel);
+
+    parcel.setDataPosition(pos);
+    sharedRegionB.readFromParcel(&parcel);
+
+    // should see same value
+    void *region2;
+    EXPECT_EQ(AAUDIO_OK, sharedRegionB.resolve(sharedMemories, &region2));
+    int32_t *buffer2 = (int32_t *)region2;
+    EXPECT_EQ(buffer1[0], buffer2[0]);
+}
+
+// Test RingBufferParcelable.
+TEST(test_marshalling, aaudio_ring_buffer_parcelable) {
+    SharedMemoryParcelable sharedMemories[2];
+    RingBufferParcelable ringBufferA;
+    RingBufferParcelable ringBufferB;
+
+    const size_t bytesPerFrame = 8;
+    const size_t framesPerBurst = 32;
+    const size_t dataSizeBytes = 2048;
+    const int32_t counterSizeBytes = sizeof(int64_t);
+    const size_t memSizeBytes = dataSizeBytes + (2 * counterSizeBytes);
+
+    int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+    ASSERT_LE(0, fd);
+    sharedMemories[0].setup(fd, memSizeBytes);
+
+    int32_t sharedMemoryIndex = 0;
+    // arrange indices and data in the shared memory
+    int32_t readOffset = 0;
+    int32_t writeOffset = readOffset + counterSizeBytes;
+    int32_t dataOffset = writeOffset + counterSizeBytes;
+    ringBufferA.setupMemory(sharedMemoryIndex, dataOffset, dataSizeBytes,
+        readOffset, writeOffset, counterSizeBytes);
+    ringBufferA.setFramesPerBurst(framesPerBurst);
+    ringBufferA.setBytesPerFrame(bytesPerFrame);
+    ringBufferA.setCapacityInFrames(dataSizeBytes / bytesPerFrame);
+
+    // setup A
+    RingBufferDescriptor descriptorA;
+    EXPECT_EQ(AAUDIO_OK, ringBufferA.resolve(sharedMemories, &descriptorA));
+    descriptorA.dataAddress[0] = 95;
+    descriptorA.dataAddress[1] = 57;
+    descriptorA.readCounterAddress[0] = 17;
+    descriptorA.writeCounterAddress[0] = 39;
+
+    // write A to parcel
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    ringBufferA.writeToParcel(&parcel);
+
+    // read B from parcel
+    parcel.setDataPosition(pos);
+    ringBufferB.readFromParcel(&parcel);
+
+    RingBufferDescriptor descriptorB;
+    EXPECT_EQ(AAUDIO_OK, ringBufferB.resolve(sharedMemories, &descriptorB));
+
+    // A and B should match
+    EXPECT_EQ(descriptorA.dataAddress[0], descriptorB.dataAddress[0]);
+    EXPECT_EQ(descriptorA.dataAddress[1], descriptorB.dataAddress[1]);
+    EXPECT_EQ(descriptorA.readCounterAddress[0], descriptorB.readCounterAddress[0]);
+    EXPECT_EQ(descriptorA.writeCounterAddress[0], descriptorB.writeCounterAddress[0]);
+
+    EXPECT_EQ(ringBufferA.getFramesPerBurst(), ringBufferB.getFramesPerBurst());
+    EXPECT_EQ(ringBufferA.getBytesPerFrame(), ringBufferB.getBytesPerFrame());
+    EXPECT_EQ(ringBufferA.getCapacityInFrames(), ringBufferB.getCapacityInFrames());
+}
diff --git a/media/liboboe/tests/test_oboe_api.cpp b/media/liboboe/tests/test_oboe_api.cpp
deleted file mode 100644
index acf3000..0000000
--- a/media/liboboe/tests/test_oboe_api.cpp
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Unit tests for Oboe 'C' API.
-
-#include <stdlib.h>
-#include <math.h>
-
-#include <gtest/gtest.h>
-
-#include <oboe/OboeDefinitions.h>
-#include <oboe/OboeAudio.h>
-#include "OboeUtilities.h"
-
-#define DEFAULT_STATE_TIMEOUT  (500 * OBOE_NANOS_PER_MILLISECOND)
-
-// Test OboeStreamBuilder
-TEST(test_oboe_api, oboe_stream_builder) {
-    const oboe_sample_rate_t requestedSampleRate1 = 48000;
-    const oboe_sample_rate_t requestedSampleRate2 = 44100;
-    const int32_t requestedSamplesPerFrame = 2;
-    const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_DATATYPE_INT16;
-
-    oboe_sample_rate_t sampleRate = 0;
-    int32_t samplesPerFrame = 0;
-    oboe_audio_format_t actualDataFormat;
-    OboeStreamBuilder oboeBuilder1;
-    OboeStreamBuilder oboeBuilder2;
-
-    oboe_result_t result = OBOE_OK;
-
-    // Use an OboeStreamBuilder to define the stream.
-    result = Oboe_createStreamBuilder(&oboeBuilder1);
-    ASSERT_EQ(OBOE_OK, result);
-
-    // Request stream properties.
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSampleRate(oboeBuilder1, requestedSampleRate1));
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSamplesPerFrame(oboeBuilder1, requestedSamplesPerFrame));
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setFormat(oboeBuilder1, requestedDataFormat));
-
-    // Check to make sure builder saved the properties.
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder1, &sampleRate));
-    EXPECT_EQ(requestedSampleRate1, sampleRate);
-
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSamplesPerFrame(oboeBuilder1, &samplesPerFrame));
-    EXPECT_EQ(requestedSamplesPerFrame, samplesPerFrame);
-
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getFormat(oboeBuilder1, &actualDataFormat));
-    EXPECT_EQ(requestedDataFormat, actualDataFormat);
-
-    result = OboeStreamBuilder_getSampleRate(0x0BADCAFE, &sampleRate); // ridiculous token
-    EXPECT_EQ(OBOE_ERROR_INVALID_HANDLE, result);
-
-    // Create a second builder and make sure they do not collide.
-    ASSERT_EQ(OBOE_OK, Oboe_createStreamBuilder(&oboeBuilder2));
-    ASSERT_NE(oboeBuilder1, oboeBuilder2);
-
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSampleRate(oboeBuilder2, requestedSampleRate2));
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder1, &sampleRate));
-    EXPECT_EQ(requestedSampleRate1, sampleRate);
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder2, &sampleRate));
-    EXPECT_EQ(requestedSampleRate2, sampleRate);
-
-    // Delete the builder.
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder1));
-
-    // Now it should no longer be valid.
-    // Note that test assumes we are using the HandleTracker. If we use plain pointers
-    // then it will be difficult to detect this kind of error.
-    result = OboeStreamBuilder_getSampleRate(oboeBuilder1, &sampleRate); // stale token
-    EXPECT_EQ(OBOE_ERROR_INVALID_HANDLE, result);
-
-    // Second builder should still be valid.
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_getSampleRate(oboeBuilder2, &sampleRate));
-    EXPECT_EQ(requestedSampleRate2, sampleRate);
-
-    // Delete the second builder.
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder2));
-
-    // Now it should no longer be valid. Assumes HandlerTracker used.
-    EXPECT_EQ(OBOE_ERROR_INVALID_HANDLE, OboeStreamBuilder_getSampleRate(oboeBuilder2, &sampleRate));
-}
-
-
-// Test creating a default stream with everything unspecified.
-TEST(test_oboe_api, oboe_stream_unspecified) {
-    OboeStreamBuilder oboeBuilder;
-    OboeStream oboeStream;
-    oboe_result_t result = OBOE_OK;
-
-    // Use an OboeStreamBuilder to define the stream.
-    result = Oboe_createStreamBuilder(&oboeBuilder);
-    ASSERT_EQ(OBOE_OK, result);
-
-    // Create an OboeStream using the Builder.
-    ASSERT_EQ(OBOE_OK, OboeStreamBuilder_openStream(oboeBuilder, &oboeStream));
-
-    // Cleanup
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder));
-    EXPECT_EQ(OBOE_OK, OboeStream_close(oboeStream));
-}
-
-// Test Writing to an OboeStream
-TEST(test_oboe_api, oboe_stream) {
-    const oboe_sample_rate_t requestedSampleRate = 48000;
-    const oboe_sample_rate_t requestedSamplesPerFrame = 2;
-    const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_DATATYPE_INT16;
-    //const oboe_sharing_mode_t requestedSharingMode = OBOE_SHARING_MODE_EXCLUSIVE; // MMAP NOIRQ
-    const oboe_sharing_mode_t requestedSharingMode = OBOE_SHARING_MODE_LEGACY; // AudioTrack
-
-    oboe_sample_rate_t actualSampleRate = -1;
-    int32_t actualSamplesPerFrame = -1;
-    oboe_audio_format_t actualDataFormat = OBOE_AUDIO_FORMAT_PCM824;
-    oboe_sharing_mode_t actualSharingMode;
-    oboe_size_frames_t framesPerBurst = -1;
-
-    oboe_size_frames_t framesWritten = 0;
-    oboe_size_frames_t framesPrimed = 0;
-    oboe_position_frames_t framesTotal = 0;
-    oboe_position_frames_t oboeFramesRead = 0;
-    oboe_position_frames_t oboeFramesRead1 = 0;
-    oboe_position_frames_t oboeFramesRead2 = 0;
-    oboe_position_frames_t oboeFramesWritten = 0;
-
-    oboe_nanoseconds_t timeoutNanos;
-
-    oboe_stream_state_t state = OBOE_STREAM_STATE_UNINITIALIZED;
-    OboeStreamBuilder oboeBuilder;
-    OboeStream oboeStream;
-
-    oboe_result_t result = OBOE_OK;
-
-    // Use an OboeStreamBuilder to define the stream.
-    result = Oboe_createStreamBuilder(&oboeBuilder);
-    ASSERT_EQ(OBOE_OK, result);
-
-    // Request stream properties.
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSampleRate(oboeBuilder, requestedSampleRate));
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSamplesPerFrame(oboeBuilder, requestedSamplesPerFrame));
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setFormat(oboeBuilder, requestedDataFormat));
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_setSharingMode(oboeBuilder, requestedSharingMode));
-
-    // Create an OboeStream using the Builder.
-    ASSERT_EQ(OBOE_OK, OboeStreamBuilder_openStream(oboeBuilder, &oboeStream));
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder));
-
-    EXPECT_EQ(OBOE_OK, OboeStream_getState(oboeStream, &state));
-    EXPECT_EQ(OBOE_STREAM_STATE_OPEN, state);
-
-    // Check to see what kind of stream we actually got.
-    EXPECT_EQ(OBOE_OK, OboeStream_getSampleRate(oboeStream, &actualSampleRate));
-    EXPECT_TRUE(actualSampleRate >= 44100 && actualSampleRate <= 96000);  // TODO what is range?
-
-    EXPECT_EQ(OBOE_OK, OboeStream_getSamplesPerFrame(oboeStream, &actualSamplesPerFrame));
-    EXPECT_TRUE(actualSamplesPerFrame >= 1 && actualSamplesPerFrame <= 16); // TODO what is max?
-
-    EXPECT_EQ(OBOE_OK, OboeStream_getSharingMode(oboeStream, &actualSharingMode));
-    EXPECT_TRUE(actualSharingMode == OBOE_SHARING_MODE_EXCLUSIVE
-            || actualSharingMode == OBOE_SHARING_MODE_LEGACY);
-
-    EXPECT_EQ(OBOE_OK, OboeStream_getFramesPerBurst(oboeStream, &framesPerBurst));
-    EXPECT_TRUE(framesPerBurst >= 16 && framesPerBurst <= 1024); // TODO what is min/max?
-
-    // Allocate a buffer for the audio data.
-    int16_t *data = new int16_t[framesPerBurst * actualSamplesPerFrame];
-    ASSERT_TRUE(NULL != data);
-
-    timeoutNanos = 0;
-    do {
-        framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
-        // There should be some room for priming the buffer.
-        framesTotal += framesWritten;
-        ASSERT_GE(framesWritten, 0);
-        ASSERT_LE(framesWritten, framesPerBurst);
-    } while(framesWritten > 0);
-    ASSERT_TRUE(framesTotal > 0);
-
-    // Start and wait for server to respond.
-    ASSERT_EQ(OBOE_OK, OboeStream_requestStart(oboeStream));
-    ASSERT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
-                                                     OBOE_STREAM_STATE_STARTING,
-                                                     &state,
-                                                     DEFAULT_STATE_TIMEOUT));
-    EXPECT_EQ(OBOE_STREAM_STATE_STARTED, state);
-
-    // Write some data while we are running. Read counter should be advancing.
-    int loops = 1 * actualSampleRate / framesPerBurst; // 1 second
-    ASSERT_LT(2, loops); // detect absurdly high framesPerBurst
-    timeoutNanos = 10 * OBOE_NANOS_PER_SECOND * framesPerBurst / actualSampleRate; // bursts
-    framesWritten = 1;
-    ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
-    oboeFramesRead1 = oboeFramesRead;
-    oboe_nanoseconds_t beginTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
-    do {
-        framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
-        ASSERT_GE(framesWritten, 0);
-        ASSERT_LE(framesWritten, framesPerBurst);
-
-        framesTotal += framesWritten;
-        EXPECT_EQ(OBOE_OK, OboeStream_getFramesWritten(oboeStream, &oboeFramesWritten));
-        EXPECT_EQ(framesTotal, oboeFramesWritten);
-
-        // Try to get a more accurate measure of the sample rate.
-        if (beginTime == 0) {
-            EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
-            if (oboeFramesRead > oboeFramesRead1) { // is read pointer advancing
-                beginTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
-                oboeFramesRead1 = oboeFramesRead;
-            }
-        }
-    } while (framesWritten > 0 && loops-- > 0);
-
-    EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead2));
-    oboe_nanoseconds_t endTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
-    ASSERT_GT(oboeFramesRead2, 0);
-    ASSERT_GT(oboeFramesRead2, oboeFramesRead1);
-    ASSERT_LE(oboeFramesRead2, oboeFramesWritten);
-
-    // TODO why is legacy so inaccurate?
-    const double rateTolerance = 200.0; // arbitrary tolerance for sample rate
-    if (requestedSharingMode != OBOE_SHARING_MODE_LEGACY) {
-        // Calculate approximate sample rate and compare with stream rate.
-        double seconds = (endTime - beginTime) / (double) OBOE_NANOS_PER_SECOND;
-        double measuredRate = (oboeFramesRead2 - oboeFramesRead1) / seconds;
-        ASSERT_NEAR(actualSampleRate, measuredRate, rateTolerance);
-    }
-
-    // Request async pause and wait for server to say that it has completed the pause.
-    ASSERT_EQ(OBOE_OK, OboeStream_requestPause(oboeStream));
-    EXPECT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
-                                            OBOE_STREAM_STATE_PAUSING,
-                                            &state,
-                                            DEFAULT_STATE_TIMEOUT));
-    EXPECT_EQ(OBOE_STREAM_STATE_PAUSED, state);
-
-    // Make sure the read counter is not advancing when we are paused.
-    ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
-    ASSERT_GE(oboeFramesRead, oboeFramesRead2); // monotonic increase
-
-    // Use this to sleep by waiting for something that won't happen.
-    OboeStream_waitForStateChange(oboeStream, OBOE_STREAM_STATE_PAUSED, &state, timeoutNanos);
-    ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead2));
-    EXPECT_EQ(oboeFramesRead, oboeFramesRead2);
-
-    // Fill up the buffer.
-    timeoutNanos = 0;
-    loops = 100;
-    do {
-        framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
-        framesTotal += framesWritten;
-    } while (framesWritten > 0 && loops-- > 0);
-    EXPECT_EQ(0, framesWritten);
-
-    // Flush and wait for server to respond.
-    ASSERT_EQ(OBOE_OK, OboeStream_requestFlush(oboeStream));
-    EXPECT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
-                                                     OBOE_STREAM_STATE_FLUSHING,
-                                                     &state,
-                                                     DEFAULT_STATE_TIMEOUT));
-    EXPECT_EQ(OBOE_STREAM_STATE_FLUSHED, state);
-
-    // After a flush, the read counter should be caught up with the write counter.
-    EXPECT_EQ(OBOE_OK, OboeStream_getFramesWritten(oboeStream, &oboeFramesWritten));
-    EXPECT_EQ(framesTotal, oboeFramesWritten);
-    EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
-    EXPECT_EQ(oboeFramesRead, oboeFramesWritten);
-
-    // The buffer should be empty after a flush so we should be able to write.
-    framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
-    // There should be some room for priming the buffer.
-    ASSERT_TRUE(framesWritten > 0 && framesWritten <= framesPerBurst);
-
-    EXPECT_EQ(OBOE_OK, OboeStream_close(oboeStream));
-}
-
-#define OBOE_THREAD_ANSWER          1826375
-#define OBOE_THREAD_DURATION_MSEC       500
-
-static void *TestOboeStreamThreadProc(void *arg) {
-    OboeStream oboeStream = (OboeStream) reinterpret_cast<size_t>(arg);
-    oboe_stream_state_t state;
-
-    // Use this to sleep by waiting for something that won't happen.
-    EXPECT_EQ(OBOE_OK, OboeStream_getState(oboeStream, &state));
-    OboeStream_waitForStateChange(oboeStream, OBOE_STREAM_STATE_PAUSED, &state,
-            OBOE_THREAD_DURATION_MSEC * OBOE_NANOS_PER_MILLISECOND);
-    return reinterpret_cast<void *>(OBOE_THREAD_ANSWER);
-}
-
-// Test creating a stream related thread.
-TEST(test_oboe_api, oboe_stream_thread_basic) {
-    OboeStreamBuilder oboeBuilder;
-    OboeStream oboeStream;
-    oboe_result_t result = OBOE_OK;
-    void *threadResult;
-
-    // Use an OboeStreamBuilder to define the stream.
-    result = Oboe_createStreamBuilder(&oboeBuilder);
-    ASSERT_EQ(OBOE_OK, result);
-
-    // Create an OboeStream using the Builder.
-    ASSERT_EQ(OBOE_OK, OboeStreamBuilder_openStream(oboeBuilder, &oboeStream));
-
-    // Start a thread.
-    ASSERT_EQ(OBOE_OK, OboeStream_createThread(oboeStream,
-            10 * OBOE_NANOS_PER_MILLISECOND,
-            TestOboeStreamThreadProc,
-            reinterpret_cast<void *>(oboeStream)));
-    // Thread already started.
-    ASSERT_NE(OBOE_OK, OboeStream_createThread(oboeStream,   // should fail!
-            10 * OBOE_NANOS_PER_MILLISECOND,
-            TestOboeStreamThreadProc,
-            reinterpret_cast<void *>(oboeStream)));
-
-    // Wait for the thread to finish.
-    ASSERT_EQ(OBOE_OK, OboeStream_joinThread(oboeStream,
-            &threadResult, 2 * OBOE_THREAD_DURATION_MSEC * OBOE_NANOS_PER_MILLISECOND));
-    // The thread returns a special answer.
-    ASSERT_EQ(OBOE_THREAD_ANSWER, (int)reinterpret_cast<size_t>(threadResult));
-
-    // Thread should already be joined.
-    ASSERT_NE(OBOE_OK, OboeStream_joinThread(oboeStream,  // should fail!
-            &threadResult, 2 * OBOE_THREAD_DURATION_MSEC * OBOE_NANOS_PER_MILLISECOND));
-
-    // Cleanup
-    EXPECT_EQ(OBOE_OK, OboeStreamBuilder_delete(oboeBuilder));
-    EXPECT_EQ(OBOE_OK, OboeStream_close(oboeStream));
-}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index f247475..3235e81 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -46,6 +46,8 @@
 #include <media/hardware/HardwareAPI.h>
 #include <media/OMXBuffer.h>
 
+#include <hidlmemory/mapping.h>
+
 #include <OMX_AudioExt.h>
 #include <OMX_VideoExt.h>
 #include <OMX_Component.h>
@@ -59,6 +61,10 @@
 #include "include/SharedMemoryBuffer.h"
 #include "omx/OMXUtils.h"
 
+#include <android/hidl/memory/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include "omx/hal/1.0/utils/WOmxNode.h"
+
 namespace android {
 
 using binder::Status;
@@ -282,7 +288,9 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-struct ACodec::DeathNotifier : public IBinder::DeathRecipient {
+struct ACodec::DeathNotifier :
+        public IBinder::DeathRecipient,
+        public ::android::hardware::hidl_death_recipient {
     explicit DeathNotifier(const sp<AMessage> &notify)
         : mNotify(notify) {
     }
@@ -291,6 +299,12 @@
         mNotify->post();
     }
 
+    virtual void serviceDied(
+            uint64_t /* cookie */,
+            const wp<::android::hidl::base::V1_0::IBase>& /* who */) {
+        mNotify->post();
+    }
+
 protected:
     virtual ~DeathNotifier() {}
 
@@ -560,6 +574,8 @@
     memset(&mLastNativeWindowCrop, 0, sizeof(mLastNativeWindowCrop));
 
     changeState(mUninitializedState);
+
+    updateTrebleFlag();
 }
 
 ACodec::~ACodec() {
@@ -811,7 +827,11 @@
 status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
     CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
 
-    CHECK(mDealer[portIndex] == NULL);
+    if (getTrebleFlag()) {
+        CHECK(mAllocator[portIndex] == NULL);
+    } else {
+        CHECK(mDealer[portIndex] == NULL);
+    }
     CHECK(mBuffers[portIndex].isEmpty());
 
     status_t err;
@@ -874,14 +894,26 @@
                 return NO_MEMORY;
             }
 
-            size_t totalSize = def.nBufferCountActual * (alignedSize + alignedConvSize);
             if (mode != IOMX::kPortModePresetSecureBuffer) {
-                mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
+                if (getTrebleFlag()) {
+                    mAllocator[portIndex] = TAllocator::getService("ashmem");
+                    if (mAllocator[portIndex] == nullptr) {
+                        ALOGE("hidl allocator on port %d is null",
+                                (int)portIndex);
+                        return NO_MEMORY;
+                    }
+                } else {
+                    size_t totalSize = def.nBufferCountActual *
+                            (alignedSize + alignedConvSize);
+                    mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
+                }
             }
 
             const sp<AMessage> &format =
                     portIndex == kPortIndexInput ? mInputFormat : mOutputFormat;
             for (OMX_U32 i = 0; i < def.nBufferCountActual && err == OK; ++i) {
+                hidl_memory hidlMemToken;
+                sp<TMemory> hidlMem;
                 sp<IMemory> mem;
 
                 BufferInfo info;
@@ -903,30 +935,86 @@
                             : new SecureBuffer(format, native_handle, bufSize);
                     info.mCodecData = info.mData;
                 } else {
-                    mem = mDealer[portIndex]->allocate(bufSize);
-                    if (mem == NULL || mem->pointer() == NULL) {
-                        return NO_MEMORY;
-                    }
+                    if (getTrebleFlag()) {
+                        bool success;
+                        auto transStatus = mAllocator[portIndex]->allocate(
+                                bufSize,
+                                [&success, &hidlMemToken](
+                                        bool s,
+                                        hidl_memory const& m) {
+                                    success = s;
+                                    hidlMemToken = m;
+                                });
 
-                    err = mOMXNode->useBuffer(portIndex, mem, &info.mBufferID);
+                        if (!transStatus.isOk()) {
+                            ALOGE("hidl's AshmemAllocator failed at the "
+                                    "transport: %s",
+                                    transStatus.description().c_str());
+                            return NO_MEMORY;
+                        }
+                        if (!success) {
+                            return NO_MEMORY;
+                        }
+                        hidlMem = mapMemory(hidlMemToken);
+
+                        err = mOMXNode->useBuffer(
+                                portIndex, hidlMemToken, &info.mBufferID);
+                    } else {
+                        mem = mDealer[portIndex]->allocate(bufSize);
+                        if (mem == NULL || mem->pointer() == NULL) {
+                            return NO_MEMORY;
+                        }
+
+                        err = mOMXNode->useBuffer(
+                                portIndex, mem, &info.mBufferID);
+                    }
 
                     if (mode == IOMX::kPortModeDynamicANWBuffer) {
-                        ((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
+                        VideoNativeMetadata* metaData = (VideoNativeMetadata*)(
+                                getTrebleFlag() ?
+                                (void*)hidlMem->getPointer() : mem->pointer());
+                        metaData->nFenceFd = -1;
                     }
 
-                    info.mCodecData = new SharedMemoryBuffer(format, mem);
-                    info.mCodecRef = mem;
+                    if (getTrebleFlag()) {
+                        info.mCodecData = new SharedMemoryBuffer(
+                                format, hidlMem);
+                        info.mCodecRef = hidlMem;
+                    } else {
+                        info.mCodecData = new SharedMemoryBuffer(
+                                format, mem);
+                        info.mCodecRef = mem;
+                    }
 
                     // if we require conversion, allocate conversion buffer for client use;
                     // otherwise, reuse codec buffer
                     if (mConverter[portIndex] != NULL) {
                         CHECK_GT(conversionBufferSize, (size_t)0);
-                        mem = mDealer[portIndex]->allocate(conversionBufferSize);
-                        if (mem == NULL|| mem->pointer() == NULL) {
-                            return NO_MEMORY;
+                        if (getTrebleFlag()) {
+                            bool success;
+                            mAllocator[portIndex]->allocate(
+                                    conversionBufferSize,
+                                    [&success, &hidlMemToken](
+                                            bool s,
+                                            hidl_memory const& m) {
+                                        success = s;
+                                        hidlMemToken = m;
+                                    });
+                            if (!success) {
+                                return NO_MEMORY;
+                            }
+                            hidlMem = mapMemory(hidlMemToken);
+                            info.mData = new SharedMemoryBuffer(format, hidlMem);
+                            info.mMemRef = hidlMem;
+                        } else {
+                            mem = mDealer[portIndex]->allocate(
+                                    conversionBufferSize);
+                            if (mem == NULL|| mem->pointer() == NULL) {
+                                return NO_MEMORY;
+                            }
+                            info.mData = new SharedMemoryBuffer(format, mem);
+                            info.mMemRef = mem;
                         }
-                        info.mData = new SharedMemoryBuffer(format, mem);
-                        info.mMemRef = mem;
                     } else {
                         info.mData = info.mCodecData;
                         info.mMemRef = info.mCodecRef;
@@ -1458,8 +1546,11 @@
         }
     }
 
-    // clear mDealer even on an error
-    mDealer[portIndex].clear();
+    if (getTrebleFlag()) {
+        mAllocator[portIndex].clear();
+    } else {
+        mDealer[portIndex].clear();
+    }
     return err;
 }
 
@@ -5543,6 +5634,7 @@
                 }
 
                 size_t size = buffer->size();
+                size_t offset = buffer->offset();
                 if (buffer->base() != info->mCodecData->base()) {
                     ALOGV("[%s] Needs to copy input data for buffer %u. (%p != %p)",
                          mCodec->mComponentName.c_str(),
@@ -5560,7 +5652,7 @@
                     }
                     size = info->mCodecData->size();
                 } else {
-                    info->mCodecData->setRange(0, size);
+                    info->mCodecData->setRange(offset, size);
                 }
 
                 if (flags & OMX_BUFFERFLAG_CODECCONFIG) {
@@ -6040,8 +6132,16 @@
 
     if (mDeathNotifier != NULL) {
         if (mCodec->mOMXNode != NULL) {
-            sp<IBinder> binder = IInterface::asBinder(mCodec->mOMXNode);
-            binder->unlinkToDeath(mDeathNotifier);
+            if (mCodec->getTrebleFlag()) {
+                auto tOmxNode =
+                        (static_cast<
+                        ::android::hardware::media::omx::V1_0::utils::
+                        LWOmxNode*>(mCodec->mOMXNode.get()))->mBase;
+                tOmxNode->unlinkToDeath(mDeathNotifier);
+            } else {
+                sp<IBinder> binder = IInterface::asBinder(mCodec->mOMXNode);
+                binder->unlinkToDeath(mDeathNotifier);
+            }
         }
         mDeathNotifier.clear();
     }
@@ -6129,7 +6229,8 @@
     CHECK(mCodec->mOMXNode == NULL);
 
     OMXClient client;
-    if (client.connect() != OK) {
+    if ((mCodec->updateTrebleFlag() ?
+            client.connectTreble() : client.connect()) != OK) {
         mCodec->signalError(OMX_ErrorUndefined, NO_INIT);
         return false;
     }
@@ -6201,10 +6302,18 @@
     }
 
     mDeathNotifier = new DeathNotifier(notify);
-    if (IInterface::asBinder(omxNode)->linkToDeath(mDeathNotifier) != OK) {
-        // This was a local binder, if it dies so do we, we won't care
-        // about any notifications in the afterlife.
-        mDeathNotifier.clear();
+    if (mCodec->getTrebleFlag()) {
+        auto tOmxNode = (static_cast<::android::hardware::media::omx::V1_0::
+                utils::LWOmxNode*>(omxNode.get()))->mBase;
+        if (!tOmxNode->linkToDeath(mDeathNotifier, 0)) {
+            mDeathNotifier.clear();
+        }
+    } else {
+        if (IInterface::asBinder(omxNode)->linkToDeath(mDeathNotifier) != OK) {
+            // This was a local binder, if it dies so do we, we won't care
+            // about any notifications in the afterlife.
+            mDeathNotifier.clear();
+        }
     }
 
     notify = new AMessage(kWhatOMXMessageList, mCodec);
@@ -7180,7 +7289,11 @@
                             mCodec->mBuffers[kPortIndexOutput].size());
                     err = FAILED_TRANSACTION;
                 } else {
-                    mCodec->mDealer[kPortIndexOutput].clear();
+                    if (mCodec->getTrebleFlag()) {
+                        mCodec->mAllocator[kPortIndexOutput].clear();
+                    } else {
+                        mCodec->mDealer[kPortIndexOutput].clear();
+                    }
                 }
 
                 if (err == OK) {
@@ -7563,7 +7676,8 @@
     }
 
     OMXClient client;
-    status_t err = client.connect();
+    status_t err = getTrebleFlag() ?
+            client.connectTreble() : client.connect();
     if (err != OK) {
         return err;
     }
@@ -7779,4 +7893,15 @@
     return OK;
 }
 
+bool ACodec::updateTrebleFlag() {
+    mTrebleFlag = bool(property_get_bool("debug.treble_omx", 0));
+    ALOGV("updateTrebleFlag() returns %s",
+            mTrebleFlag ? "true" : "false");
+    return mTrebleFlag;
+}
+
+bool ACodec::getTrebleFlag() const {
+    return mTrebleFlag;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 1db7ab0..296394b 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -88,14 +88,9 @@
 }
 
 status_t ACodecBufferChannel::queueSecureInputBuffer(
-        const sp<MediaCodecBuffer> &buffer,
-        bool secure,
-        const uint8_t *key,
-        const uint8_t *iv,
-        CryptoPlugin::Mode mode,
-        CryptoPlugin::Pattern pattern,
-        const CryptoPlugin::SubSample *subSamples,
-        size_t numSubSamples,
+        const sp<MediaCodecBuffer> &buffer, bool secure, const uint8_t *key,
+        const uint8_t *iv, CryptoPlugin::Mode mode, CryptoPlugin::Pattern pattern,
+        const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
         AString *errorDetailMsg) {
     if (mCrypto == nullptr) {
         return -ENOSYS;
@@ -107,35 +102,32 @@
         return -ENOENT;
     }
 
-    void *dst_pointer = nullptr;
-    ICrypto::DestinationType dst_type = ICrypto::kDestinationTypeOpaqueHandle;
-
+    ICrypto::DestinationBuffer destination;
     if (secure) {
-        sp<SecureBuffer> secureData = static_cast<SecureBuffer *>(it->mCodecBuffer.get());
-        dst_pointer = secureData->getDestinationPointer();
-        dst_type = secureData->getDestinationType();
+        sp<SecureBuffer> secureData =
+                static_cast<SecureBuffer *>(it->mCodecBuffer.get());
+        destination.mType = secureData->getDestinationType();
+        if (destination.mType != ICrypto::kDestinationTypeNativeHandle) {
+            return BAD_VALUE;
+        }
+        destination.mHandle =
+                static_cast<native_handle_t *>(secureData->getDestinationPointer());
     } else {
-        dst_pointer = it->mCodecBuffer->base();
-        dst_type = ICrypto::kDestinationTypeVmPointer;
+        destination.mType = ICrypto::kDestinationTypeSharedMemory;
+        destination.mSharedMemory = mDecryptDestination;
     }
-
-    ssize_t result = mCrypto->decrypt(
-            dst_type,
-            key,
-            iv,
-            mode,
-            pattern,
-            it->mSharedEncryptedBuffer,
-            it->mClientBuffer->offset(),
-            subSamples,
-            numSubSamples,
-            dst_pointer,
-            errorDetailMsg);
+    ssize_t result = mCrypto->decrypt(key, iv, mode, pattern,
+            it->mSharedEncryptedBuffer, it->mClientBuffer->offset(),
+            subSamples, numSubSamples, destination, errorDetailMsg);
 
     if (result < 0) {
         return result;
     }
 
+    if (destination.mType == ICrypto::kDestinationTypeSharedMemory) {
+        memcpy(it->mCodecBuffer->base(), destination.mSharedMemory->pointer(), result);
+    }
+
     it->mCodecBuffer->setRange(0, result);
 
     // Copy metadata from client to codec buffer.
@@ -228,7 +220,14 @@
                 (size_t sum, const BufferAndId& elem) {
                     return sum + align(elem.mBuffer->capacity(), alignment);
                 });
-        mDealer = new MemoryDealer(totalSize, "ACodecBufferChannel");
+        size_t maxSize = std::accumulate(
+                array.begin(), array.end(), 0u,
+                [alignment = MemoryDealer::getAllocationAlignment()]
+                (size_t max, const BufferAndId& elem) {
+                    return std::max(max, align(elem.mBuffer->capacity(), alignment));
+                });
+        mDealer = new MemoryDealer(totalSize + maxSize, "ACodecBufferChannel");
+        mDecryptDestination = mDealer->allocate(maxSize);
     }
     std::vector<const BufferInfo> inputBuffers;
     for (const BufferAndId &elem : array) {
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index f3d622b..410dbc9 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -18,7 +18,6 @@
         DataConverter.cpp                 \
         DataSource.cpp                    \
         DataURISource.cpp                 \
-        DRMExtractor.cpp                  \
         ESDS.cpp                          \
         FileSource.cpp                    \
         FLACExtractor.cpp                 \
@@ -114,6 +113,11 @@
         libstagefright_foundation \
         libdl \
         libRScpp \
+        libhidlbase \
+        libhidlmemory \
+        android.hidl.memory@1.0 \
+        android.hardware.media.omx@1.0 \
+        android.hardware.media.omx@1.0-utils \
 
 LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libmedia
 
diff --git a/media/libstagefright/BufferImpl.cpp b/media/libstagefright/BufferImpl.cpp
index 37a40ec..fee3739 100644
--- a/media/libstagefright/BufferImpl.cpp
+++ b/media/libstagefright/BufferImpl.cpp
@@ -34,6 +34,11 @@
       mMemory(mem) {
 }
 
+SharedMemoryBuffer::SharedMemoryBuffer(const sp<AMessage> &format, const sp<TMemory> &mem)
+    : MediaCodecBuffer(format, new ABuffer(mem->getPointer(), mem->getSize())),
+      mTMemory(mem) {
+}
+
 SecureBuffer::SecureBuffer(const sp<AMessage> &format, const void *ptr, size_t size)
     : MediaCodecBuffer(format, new ABuffer(nullptr, size)),
       mPointer(ptr) {
@@ -51,8 +56,7 @@
 }
 
 ICrypto::DestinationType SecureBuffer::getDestinationType() {
-    return mHandle == nullptr ? ICrypto::kDestinationTypeOpaqueHandle
-                              : ICrypto::kDestinationTypeNativeHandle;
+    return ICrypto::kDestinationTypeNativeHandle;
 }
 
 }  // namespace android
diff --git a/media/libstagefright/DRMExtractor.cpp b/media/libstagefright/DRMExtractor.cpp
deleted file mode 100644
index 8ba36d5..0000000
--- a/media/libstagefright/DRMExtractor.cpp
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "include/DRMExtractor.h"
-
-#include <arpa/inet.h>
-#include <utils/String8.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaBuffer.h>
-
-#include <drm/drm_framework_common.h>
-#include <utils/Errors.h>
-
-
-namespace android {
-
-class DRMSource : public MediaSource {
-public:
-    DRMSource(const sp<IMediaSource> &mediaSource,
-            const sp<DecryptHandle> &decryptHandle,
-            DrmManagerClient *managerClient,
-            int32_t trackId, DrmBuffer *ipmpBox);
-
-    virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
-    virtual sp<MetaData> getFormat();
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
-    virtual ~DRMSource();
-
-private:
-    sp<IMediaSource> mOriginalMediaSource;
-    sp<DecryptHandle> mDecryptHandle;
-    DrmManagerClient* mDrmManagerClient;
-    size_t mTrackId;
-    mutable Mutex mDRMLock;
-    size_t mNALLengthSize;
-    bool mWantsNALFragments;
-
-    DRMSource(const DRMSource &);
-    DRMSource &operator=(const DRMSource &);
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-DRMSource::DRMSource(const sp<IMediaSource> &mediaSource,
-        const sp<DecryptHandle> &decryptHandle,
-        DrmManagerClient *managerClient,
-        int32_t trackId, DrmBuffer *ipmpBox)
-    : mOriginalMediaSource(mediaSource),
-      mDecryptHandle(decryptHandle),
-      mDrmManagerClient(managerClient),
-      mTrackId(trackId),
-      mNALLengthSize(0),
-      mWantsNALFragments(false) {
-    CHECK(mDrmManagerClient);
-    mDrmManagerClient->initializeDecryptUnit(
-            mDecryptHandle, trackId, ipmpBox);
-
-    const char *mime;
-    bool success = getFormat()->findCString(kKeyMIMEType, &mime);
-    CHECK(success);
-
-    if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
-        uint32_t type;
-        const void *data;
-        size_t size;
-        CHECK(getFormat()->findData(kKeyAVCC, &type, &data, &size));
-
-        const uint8_t *ptr = (const uint8_t *)data;
-
-        CHECK(size >= 7);
-        CHECK_EQ(ptr[0], 1);  // configurationVersion == 1
-
-        // The number of bytes used to encode the length of a NAL unit.
-        mNALLengthSize = 1 + (ptr[4] & 3);
-    }
-}
-
-DRMSource::~DRMSource() {
-    Mutex::Autolock autoLock(mDRMLock);
-    mDrmManagerClient->finalizeDecryptUnit(mDecryptHandle, mTrackId);
-}
-
-status_t DRMSource::start(MetaData *params) {
-    int32_t val;
-    if (params && params->findInt32(kKeyWantsNALFragments, &val)
-        && val != 0) {
-        mWantsNALFragments = true;
-    } else {
-        mWantsNALFragments = false;
-    }
-
-   return mOriginalMediaSource->start(params);
-}
-
-status_t DRMSource::stop() {
-    return mOriginalMediaSource->stop();
-}
-
-sp<MetaData> DRMSource::getFormat() {
-    return mOriginalMediaSource->getFormat();
-}
-
-status_t DRMSource::read(MediaBuffer **buffer, const ReadOptions *options) {
-    Mutex::Autolock autoLock(mDRMLock);
-    status_t err;
-    if ((err = mOriginalMediaSource->read(buffer, options)) != OK) {
-        return err;
-    }
-
-    size_t len = (*buffer)->range_length();
-
-    char *src = (char *)(*buffer)->data() + (*buffer)->range_offset();
-
-    DrmBuffer encryptedDrmBuffer(src, len);
-    DrmBuffer decryptedDrmBuffer;
-    decryptedDrmBuffer.length = len;
-    decryptedDrmBuffer.data = new char[len];
-    DrmBuffer *pDecryptedDrmBuffer = &decryptedDrmBuffer;
-
-    if ((err = mDrmManagerClient->decrypt(mDecryptHandle, mTrackId,
-            &encryptedDrmBuffer, &pDecryptedDrmBuffer)) != NO_ERROR) {
-
-        if (decryptedDrmBuffer.data) {
-            delete [] decryptedDrmBuffer.data;
-            decryptedDrmBuffer.data = NULL;
-        }
-
-        return err;
-    }
-    CHECK(pDecryptedDrmBuffer == &decryptedDrmBuffer);
-
-    const char *mime;
-    CHECK(getFormat()->findCString(kKeyMIMEType, &mime));
-
-    if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC) && !mWantsNALFragments) {
-        uint8_t *dstData = (uint8_t*)src;
-        size_t srcOffset = 0;
-        size_t dstOffset = 0;
-
-        len = decryptedDrmBuffer.length;
-        while (srcOffset < len) {
-            CHECK(srcOffset + mNALLengthSize <= len);
-            size_t nalLength = 0;
-            const uint8_t* data = (const uint8_t*)(&decryptedDrmBuffer.data[srcOffset]);
-
-            switch (mNALLengthSize) {
-                case 1:
-                    nalLength = *data;
-                    break;
-                case 2:
-                    nalLength = U16_AT(data);
-                    break;
-                case 3:
-                    nalLength = ((size_t)data[0] << 16) | U16_AT(&data[1]);
-                    break;
-                case 4:
-                    nalLength = U32_AT(data);
-                    break;
-                default:
-                    CHECK(!"Should not be here.");
-                    break;
-            }
-
-            srcOffset += mNALLengthSize;
-
-            size_t end = srcOffset + nalLength;
-            if (end > len || end < srcOffset) {
-                if (decryptedDrmBuffer.data) {
-                    delete [] decryptedDrmBuffer.data;
-                    decryptedDrmBuffer.data = NULL;
-                }
-
-                return ERROR_MALFORMED;
-            }
-
-            if (nalLength == 0) {
-                continue;
-            }
-
-            if (dstOffset > SIZE_MAX - 4 ||
-                dstOffset + 4 > SIZE_MAX - nalLength ||
-                dstOffset + 4 + nalLength > (*buffer)->size()) {
-                (*buffer)->release();
-                (*buffer) = NULL;
-                if (decryptedDrmBuffer.data) {
-                    delete [] decryptedDrmBuffer.data;
-                    decryptedDrmBuffer.data = NULL;
-                }
-                return ERROR_MALFORMED;
-            }
-
-            dstData[dstOffset++] = 0;
-            dstData[dstOffset++] = 0;
-            dstData[dstOffset++] = 0;
-            dstData[dstOffset++] = 1;
-            memcpy(&dstData[dstOffset], &decryptedDrmBuffer.data[srcOffset], nalLength);
-            srcOffset += nalLength;
-            dstOffset += nalLength;
-        }
-
-        CHECK_EQ(srcOffset, len);
-        (*buffer)->set_range((*buffer)->range_offset(), dstOffset);
-
-    } else {
-        memcpy(src, decryptedDrmBuffer.data, decryptedDrmBuffer.length);
-        (*buffer)->set_range((*buffer)->range_offset(), decryptedDrmBuffer.length);
-    }
-
-    if (decryptedDrmBuffer.data) {
-        delete [] decryptedDrmBuffer.data;
-        decryptedDrmBuffer.data = NULL;
-    }
-
-    return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-DRMExtractor::DRMExtractor(const sp<DataSource> &source, const char* mime)
-    : mDataSource(source),
-      mDecryptHandle(NULL),
-      mDrmManagerClient(NULL) {
-    mOriginalExtractor = MediaExtractor::Create(source, mime);
-    mOriginalExtractor->getMetaData()->setInt32(kKeyIsDRM, 1);
-
-    source->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
-}
-
-DRMExtractor::~DRMExtractor() {
-}
-
-size_t DRMExtractor::countTracks() {
-    return mOriginalExtractor->countTracks();
-}
-
-sp<IMediaSource> DRMExtractor::getTrack(size_t index) {
-    sp<IMediaSource> originalMediaSource = mOriginalExtractor->getTrack(index);
-    originalMediaSource->getFormat()->setInt32(kKeyIsDRM, 1);
-
-    int32_t trackID;
-    CHECK(getTrackMetaData(index, 0)->findInt32(kKeyTrackID, &trackID));
-
-    DrmBuffer ipmpBox;
-    ipmpBox.data = mOriginalExtractor->getDrmTrackInfo(trackID, &(ipmpBox.length));
-    CHECK(ipmpBox.length > 0);
-
-    return interface_cast<IMediaSource>(
-            new DRMSource(originalMediaSource, mDecryptHandle, mDrmManagerClient,
-            trackID, &ipmpBox));
-}
-
-sp<MetaData> DRMExtractor::getTrackMetaData(size_t index, uint32_t flags) {
-    return mOriginalExtractor->getTrackMetaData(index, flags);
-}
-
-sp<MetaData> DRMExtractor::getMetaData() {
-    return mOriginalExtractor->getMetaData();
-}
-
-bool SniffDRM(
-    const sp<DataSource> &source, String8 *mimeType, float *confidence,
-        sp<AMessage> *) {
-    sp<DecryptHandle> decryptHandle = source->DrmInitialization();
-
-    if (decryptHandle != NULL) {
-        if (decryptHandle->decryptApiType == DecryptApiType::CONTAINER_BASED) {
-            *mimeType = String8("drm+container_based+") + decryptHandle->mimeType;
-            *confidence = 10.0f;
-        } else if (decryptHandle->decryptApiType == DecryptApiType::ELEMENTARY_STREAM_BASED) {
-            *mimeType = String8("drm+es_based+") + decryptHandle->mimeType;
-            *confidence = 10.0f;
-        } else {
-            return false;
-        }
-
-        return true;
-    }
-
-    return false;
-}
-} //namespace android
-
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 716f5d8..d46ef3c 100755
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -79,6 +79,8 @@
 static const char kMetaKey_CaptureFps[] = "com.android.capture.fps";
 static const char kMetaKey_TemporalLayerCount[] = "com.android.video.temporal_layers_count";
 
+static const int kTimestampDebugCount = 10;
+
 static const uint8_t kMandatoryHevcNalUnitTypes[3] = {
     kHevcNalUnitTypeVps,
     kHevcNalUnitTypeSps,
@@ -101,7 +103,7 @@
     ~Track();
 
     status_t start(MetaData *params);
-    status_t stop();
+    status_t stop(bool stopSource = true);
     status_t pause();
     bool reachedEOS();
 
@@ -118,6 +120,7 @@
     status_t dump(int fd, const Vector<String16>& args) const;
     static const char *getFourCCForMime(const char *mime);
     const char *getTrackType() const;
+    void resetInternal();
 
 private:
     enum {
@@ -275,6 +278,7 @@
     bool mIsAudio;
     bool mIsVideo;
     bool mIsMPEG4;
+    bool mGotStartKeyFrame;
     bool mIsMalformed;
     int32_t mTrackId;
     int64_t mTrackDurationUs;
@@ -303,6 +307,9 @@
     int64_t mMinCttsOffsetTimeUs;
     int64_t mMaxCttsOffsetTimeUs;
 
+    // Save the last 10 frames' timestamp for debug.
+    std::list<std::pair<int64_t, int64_t>> mTimestampDebugHelper;
+
     // Sequence parameter set or picture parameter set
     struct AVCParamSet {
         AVCParamSet(uint16_t length, const uint8_t *data)
@@ -332,6 +339,8 @@
     // Update the audio track's drift information.
     void updateDriftTime(const sp<MetaData>& meta);
 
+    void dumpTimeStamps();
+
     int32_t getStartTimeOffsetScaledTime() const;
 
     static void *ThreadWrapper(void *me);
@@ -411,41 +420,8 @@
     Track &operator=(const Track &);
 };
 
-MPEG4Writer::MPEG4Writer(int fd)
-    : mFd(dup(fd)),
-      mInitCheck(mFd < 0? NO_INIT: OK),
-      mIsRealTimeRecording(true),
-      mUse4ByteNalLength(true),
-      mUse32BitOffset(true),
-      mIsFileSizeLimitExplicitlyRequested(false),
-      mPaused(false),
-      mStarted(false),
-      mWriterThreadStarted(false),
-      mOffset(0),
-      mMdatOffset(0),
-      mMoovBoxBuffer(NULL),
-      mMoovBoxBufferOffset(0),
-      mWriteMoovBoxToMemory(false),
-      mFreeBoxOffset(0),
-      mStreamableFile(false),
-      mEstimatedMoovBoxSize(0),
-      mMoovExtraSize(0),
-      mInterleaveDurationUs(1000000),
-      mTimeScale(-1),
-      mStartTimestampUs(-1ll),
-      mLatitudex10000(0),
-      mLongitudex10000(0),
-      mAreGeoTagsAvailable(false),
-      mStartTimeOffsetMs(-1),
-      mMetaKeys(new AMessage()) {
-    addDeviceMeta();
-
-    // Verify mFd is seekable
-    off64_t off = lseek64(mFd, 0, SEEK_SET);
-    if (off < 0) {
-        ALOGE("cannot seek mFd: %s (%d)", strerror(errno), errno);
-        release();
-    }
+MPEG4Writer::MPEG4Writer(int fd) {
+    initInternal(fd);
 }
 
 MPEG4Writer::~MPEG4Writer() {
@@ -458,6 +434,54 @@
         mTracks.erase(it);
     }
     mTracks.clear();
+
+    if (mNextFd != -1) {
+        close(mNextFd);
+    }
+}
+
+void MPEG4Writer::initInternal(int fd) {
+    ALOGV("initInternal");
+    mFd = dup(fd);
+    mNextFd = -1;
+    mInitCheck = mFd < 0? NO_INIT: OK;
+    mIsRealTimeRecording = true;
+    mUse4ByteNalLength = true;
+    mUse32BitOffset = true;
+    mIsFileSizeLimitExplicitlyRequested = false;
+    mPaused = false;
+    mStarted = false;
+    mWriterThreadStarted = false;
+    mSendNotify = false;
+    mOffset = 0;
+    mMdatOffset = 0;
+    mMoovBoxBuffer = NULL;
+    mMoovBoxBufferOffset = 0;
+    mWriteMoovBoxToMemory = false;
+    mFreeBoxOffset = 0;
+    mStreamableFile = false;
+    mEstimatedMoovBoxSize = 0;
+    mMoovExtraSize = 0;
+    mInterleaveDurationUs = 1000000;
+    mTimeScale = -1;
+    mStartTimestampUs = -1ll;
+    mLatitudex10000 = 0;
+    mLongitudex10000 = 0;
+    mAreGeoTagsAvailable = false;
+    mStartTimeOffsetMs = -1;
+    mSwitchPending = false;
+    mMetaKeys = new AMessage();
+    addDeviceMeta();
+    // Verify mFd is seekable
+    off64_t off = lseek64(mFd, 0, SEEK_SET);
+    if (off < 0) {
+        ALOGE("cannot seek mFd: %s (%d) %lld", strerror(errno), errno, (long long)mFd);
+        release();
+    }
+    for (List<Track *>::iterator it = mTracks.begin();
+         it != mTracks.end(); ++it) {
+        (*it)->resetInternal();
+    }
 }
 
 status_t MPEG4Writer::dump(
@@ -667,6 +691,7 @@
     if (mInitCheck != OK) {
         return UNKNOWN_ERROR;
     }
+    mStartMeta = param;
 
     /*
      * Check mMaxFileSizeLimitBytes at the beginning
@@ -924,7 +949,30 @@
     mMoovBoxBuffer = NULL;
 }
 
-status_t MPEG4Writer::reset() {
+void MPEG4Writer::finishCurrentSession() {
+    reset(false /* stopSource */);
+}
+
+status_t MPEG4Writer::switchFd() {
+    ALOGV("switchFd");
+    Mutex::Autolock l(mLock);
+    if (mSwitchPending) {
+        return OK;
+    }
+
+    if (mNextFd == -1) {
+        ALOGW("No FileDescripter for next recording");
+        return INVALID_OPERATION;
+    }
+
+    mSwitchPending = true;
+    sp<AMessage> msg = new AMessage(kWhatSwitch, mReflector);
+    status_t err = msg->post();
+
+    return err;
+}
+
+status_t MPEG4Writer::reset(bool stopSource) {
     if (mInitCheck != OK) {
         return OK;
     } else {
@@ -943,7 +991,7 @@
     int64_t minDurationUs = 0x7fffffffffffffffLL;
     for (List<Track *>::iterator it = mTracks.begin();
          it != mTracks.end(); ++it) {
-        status_t status = (*it)->stop();
+        status_t status = (*it)->stop(stopSource);
         if (err == OK && status != OK) {
             err = status;
         }
@@ -1433,6 +1481,18 @@
     return OK;
 }
 
+void MPEG4Writer::notifyApproachingLimit() {
+    Mutex::Autolock autolock(mLock);
+    // Only notify once.
+    if (mSendNotify) {
+        return;
+    }
+    ALOGW("Recorded file size is approaching limit %" PRId64 "bytes",
+        mMaxFileSizeLimitBytes);
+    notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_APPROACHING, 0);
+    mSendNotify = true;
+}
+
 void MPEG4Writer::write(const void *data, size_t size) {
     write(data, 1, size);
 }
@@ -1446,7 +1506,6 @@
     if (mMaxFileSizeLimitBytes == 0) {
         return false;
     }
-
     int64_t nTotalBytesEstimate = static_cast<int64_t>(mEstimatedMoovBoxSize);
     for (List<Track *>::iterator it = mTracks.begin();
          it != mTracks.end(); ++it) {
@@ -1457,12 +1516,33 @@
         // Add 1024 bytes as error tolerance
         return nTotalBytesEstimate + 1024 >= mMaxFileSizeLimitBytes;
     }
+
     // Be conservative in the estimate: do not exceed 95% of
     // the target file limit. For small target file size limit, though,
     // this will not help.
     return (nTotalBytesEstimate >= (95 * mMaxFileSizeLimitBytes) / 100);
 }
 
+bool MPEG4Writer::approachingFileSizeLimit() {
+    // No limit
+    if (mMaxFileSizeLimitBytes == 0) {
+        return false;
+    }
+
+    int64_t nTotalBytesEstimate = static_cast<int64_t>(mEstimatedMoovBoxSize);
+    for (List<Track *>::iterator it = mTracks.begin();
+         it != mTracks.end(); ++it) {
+        nTotalBytesEstimate += (*it)->getEstimatedTrackSizeBytes();
+    }
+
+    if (!mStreamableFile) {
+        // Add 1024 bytes as error tolerance
+        return nTotalBytesEstimate + 1024 >= (90 * mMaxFileSizeLimitBytes) / 100;
+    }
+
+    return (nTotalBytesEstimate >= (90 * mMaxFileSizeLimitBytes) / 100);
+}
+
 bool MPEG4Writer::exceedsFileDurationLimit() {
     // No limit
     if (mMaxFileDurationLimitUs == 0) {
@@ -1522,6 +1602,7 @@
       mPaused(false),
       mResumed(false),
       mStarted(false),
+      mGotStartKeyFrame(false),
       mIsMalformed(false),
       mTrackId(trackId),
       mTrackDurationUs(0),
@@ -1561,6 +1642,50 @@
     setTimeScale();
 }
 
+// Clear all the internal states except the CSD data.
+void MPEG4Writer::Track::resetInternal() {
+      mDone = false;
+      mPaused = false;
+      mResumed = false;
+      mStarted = false;
+      mGotStartKeyFrame = false;
+      mIsMalformed = false;
+      mTrackDurationUs = 0;
+      mEstimatedTrackSizeBytes = 0;
+      mSamplesHaveSameSize = 0;
+      if (mStszTableEntries != NULL) {
+         delete mStszTableEntries;
+         mStszTableEntries = new ListTableEntries<uint32_t, 1>(1000);
+      }
+
+      if (mStcoTableEntries != NULL) {
+         delete mStcoTableEntries;
+         mStcoTableEntries = new ListTableEntries<uint32_t, 1>(1000);
+      }
+      if (mCo64TableEntries != NULL) {
+         delete mCo64TableEntries;
+         mCo64TableEntries = new ListTableEntries<off64_t, 1>(1000);
+      }
+
+      if (mStscTableEntries != NULL) {
+         delete mStscTableEntries;
+         mStscTableEntries = new ListTableEntries<uint32_t, 3>(1000);
+      }
+      if (mStssTableEntries != NULL) {
+         delete mStssTableEntries;
+         mStssTableEntries = new ListTableEntries<uint32_t, 1>(1000);
+      }
+      if (mSttsTableEntries != NULL) {
+         delete mSttsTableEntries;
+         mSttsTableEntries = new ListTableEntries<uint32_t, 2>(1000);
+      }
+      if (mCttsTableEntries != NULL) {
+         delete mCttsTableEntries;
+         mCttsTableEntries = new ListTableEntries<uint32_t, 2>(1000);
+      }
+      mReachedEOS = false;
+}
+
 void MPEG4Writer::Track::updateTrackSizeEstimate() {
 
     uint32_t stcoBoxCount = (mOwner->use32BitFileOffset()
@@ -1614,6 +1739,24 @@
     mCttsTableEntries->add(htonl(duration));
 }
 
+status_t MPEG4Writer::setNextFd(int fd) {
+    ALOGV("addNextFd");
+    Mutex::Autolock l(mLock);
+    if (mLooper == NULL) {
+        mReflector = new AHandlerReflector<MPEG4Writer>(this);
+        mLooper = new ALooper;
+        mLooper->registerHandler(mReflector);
+        mLooper->start();
+    }
+
+    if (mNextFd != -1) {
+        // No need to set a new FD yet.
+        return INVALID_OPERATION;
+    }
+    mNextFd = fd;
+    return OK;
+}
+
 void MPEG4Writer::Track::addChunkOffset(off64_t offset) {
     if (mOwner->use32BitFileOffset()) {
         uint32_t value = offset;
@@ -1645,8 +1788,29 @@
     CHECK_GT(mTimeScale, 0);
 }
 
+void MPEG4Writer::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatSwitch:
+        {
+            finishCurrentSession();
+            mLock.lock();
+            int fd = mNextFd;
+            mNextFd = -1;
+            mLock.unlock();
+            initInternal(fd);
+            start(mStartMeta.get());
+            mSwitchPending = false;
+            notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_NEXT_OUTPUT_FILE_STARTED, 0);
+            break;
+        }
+        default:
+        TRESPASS();
+    }
+}
+
 void MPEG4Writer::Track::getCodecSpecificDataFromInputFormatIfPossible() {
     const char *mime;
+
     CHECK(mMeta->findCString(kKeyMIMEType, &mime));
 
     uint32_t type;
@@ -1949,8 +2113,8 @@
     return OK;
 }
 
-status_t MPEG4Writer::Track::stop() {
-    ALOGD("%s track stopping", getTrackType());
+status_t MPEG4Writer::Track::stop(bool stopSource) {
+    ALOGD("%s track stopping. %s source", getTrackType(), stopSource ? "Stop" : "Not Stop");
     if (!mStarted) {
         ALOGE("Stop() called but track is not started");
         return ERROR_END_OF_STREAM;
@@ -1960,16 +2124,17 @@
         return OK;
     }
     mDone = true;
-
-    ALOGD("%s track source stopping", getTrackType());
-    mSource->stop();
-    ALOGD("%s track source stopped", getTrackType());
+    if (stopSource) {
+        ALOGD("%s track source stopping", getTrackType());
+        mSource->stop();
+        ALOGD("%s track source stopped", getTrackType());
+    }
 
     void *dummy;
     pthread_join(mThread, &dummy);
     status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
 
-    ALOGD("%s track stopped", getTrackType());
+    ALOGD("%s track stopped. %s source", getTrackType(), stopSource ? "Stop" : "Not Stop");
     return err;
 }
 
@@ -2343,6 +2508,17 @@
     }
 }
 
+void MPEG4Writer::Track::dumpTimeStamps() {
+    ALOGE("Dumping %s track's last 10 frames timestamp ", getTrackType());
+    std::string timeStampString;
+    for (std::list<std::pair<int64_t, int64_t>>::iterator num = mTimestampDebugHelper.begin();
+            num != mTimestampDebugHelper.end(); ++num) {
+        timeStampString += "(" + std::to_string(num->first)+
+                "us, " + std::to_string(num->second) + "us) ";
+    }
+    ALOGE("%s", timeStampString.c_str());
+}
+
 status_t MPEG4Writer::Track::threadEntry() {
     int32_t count = 0;
     const int64_t interleaveDurationUs = mOwner->interleaveDuration();
@@ -2412,19 +2588,17 @@
                 ALOGI("ignoring additional CSD for video track after first frame");
             } else {
                 mMeta = mSource->getFormat(); // get output format after format change
-
+                status_t err;
                 if (mIsAvc) {
-                    status_t err = makeAVCCodecSpecificData(
+                    err = makeAVCCodecSpecificData(
                             (const uint8_t *)buffer->data()
                                 + buffer->range_offset(),
                             buffer->range_length());
-                    CHECK_EQ((status_t)OK, err);
                 } else if (mIsHevc) {
-                    status_t err = makeHEVCCodecSpecificData(
+                    err = makeHEVCCodecSpecificData(
                             (const uint8_t *)buffer->data()
                                 + buffer->range_offset(),
                             buffer->range_length());
-                    CHECK_EQ((status_t)OK, err);
                 } else if (mIsMPEG4) {
                     copyCodecSpecificData((const uint8_t *)buffer->data() + buffer->range_offset(),
                             buffer->range_length());
@@ -2433,6 +2607,12 @@
 
             buffer->release();
             buffer = NULL;
+            if (OK != err) {
+                mSource->stop();
+                mOwner->notify(MEDIA_RECORDER_TRACK_EVENT_ERROR,
+                       mTrackId | MEDIA_RECORDER_TRACK_ERROR_GENERAL, err);
+                break;
+            }
 
             mGotAllCodecSpecificData = true;
             continue;
@@ -2476,13 +2656,20 @@
         updateTrackSizeEstimate();
 
         if (mOwner->exceedsFileSizeLimit()) {
-            ALOGW("Recorded file size exceeds limit %" PRId64 "bytes",
-                    mOwner->mMaxFileSizeLimitBytes);
-            mOwner->notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+            if (mOwner->switchFd() != OK) {
+                ALOGW("Recorded file size exceeds limit %" PRId64 "bytes",
+                        mOwner->mMaxFileSizeLimitBytes);
+                mSource->stop();
+                mOwner->notify(
+                        MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+            } else {
+                ALOGV("%s Current recorded file size exceeds limit %" PRId64 "bytes. Switching output",
+                        getTrackType(), mOwner->mMaxFileSizeLimitBytes);
+            }
             copy->release();
-            mSource->stop();
             break;
         }
+
         if (mOwner->exceedsFileDurationLimit()) {
             ALOGW("Recorded file duration exceeds limit %" PRId64 "microseconds",
                     mOwner->mMaxFileDurationLimitUs);
@@ -2492,11 +2679,23 @@
             break;
         }
 
+        if (mOwner->approachingFileSizeLimit()) {
+            mOwner->notifyApproachingLimit();
+        }
 
         int32_t isSync = false;
         meta_data->findInt32(kKeyIsSyncFrame, &isSync);
         CHECK(meta_data->findInt64(kKeyTime, &timestampUs));
 
+        // For video, skip the first several non-key frames until getting the first key frame.
+        if (mIsVideo && !mGotStartKeyFrame && !isSync) {
+            ALOGD("Video skip non-key frame");
+            copy->release();
+            continue;
+        }
+        if (mIsVideo && isSync) {
+            mGotStartKeyFrame = true;
+        }
 ////////////////////////////////////////////////////////////////////////////////
         if (mStszTableEntries->count() == 0) {
             mFirstSampleTimeRealUs = systemTime() / 1000;
@@ -2525,8 +2724,9 @@
             previousPausedDurationUs += pausedDurationUs - lastDurationUs;
             mResumed = false;
         }
-
+        std::pair<int64_t, int64_t> timestampPair;
         timestampUs -= previousPausedDurationUs;
+        timestampPair.first = timestampUs;
         if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
             copy->release();
             mSource->stop();
@@ -2548,9 +2748,11 @@
             if (mLastDecodingTimeUs < 0) {
                 decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
             } else {
-                // increase decoding time by at least 1 tick
-                decodingTimeUs = std::max(
-                        mLastDecodingTimeUs + divUp(1000000, mTimeScale), decodingTimeUs);
+                // increase decoding time by at least the larger vaule of 1 tick and
+                // 0.1 milliseconds. This needs to take into account the possible
+                // delta adjustment in DurationTicks in below.
+                decodingTimeUs = std::max(mLastDecodingTimeUs +
+                        std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
             }
 
             mLastDecodingTimeUs = decodingTimeUs;
@@ -2683,6 +2885,12 @@
         lastDurationUs = timestampUs - lastTimestampUs;
         lastDurationTicks = currDurationTicks;
         lastTimestampUs = timestampUs;
+        timestampPair.second = timestampUs;
+        // Insert the timestamp into the mTimestampDebugHelper
+        if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
+            mTimestampDebugHelper.pop_front();
+        }
+        mTimestampDebugHelper.push_back(timestampPair);
 
         if (isSync != 0) {
             addOneStssTableEntry(mStszTableEntries->count());
@@ -2738,6 +2946,7 @@
     }
 
     if (isTrackMalFormed()) {
+        dumpTimeStamps();
         err = ERROR_MALFORMED;
     }
 
@@ -3260,8 +3469,14 @@
         mOwner->writeInt32(0);
     } else {
         int32_t width, height;
-        bool success = mMeta->findInt32(kKeyWidth, &width);
-        success = success && mMeta->findInt32(kKeyHeight, &height);
+        bool success = mMeta->findInt32(kKeyDisplayWidth, &width);
+        success = success && mMeta->findInt32(kKeyDisplayHeight, &height);
+
+        // Use width/height if display width/height are not present.
+        if (!success) {
+            success = mMeta->findInt32(kKeyWidth, &width);
+            success = success && mMeta->findInt32(kKeyHeight, &height);
+        }
         CHECK(success);
 
         mOwner->writeInt32(width << 16);   // 32-bit fixed-point value
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 9eca982..a332cce 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -33,6 +33,7 @@
 #include <media/IOMX.h>
 #include <media/IResourceManagerService.h>
 #include <media/MediaCodecBuffer.h>
+#include <media/MediaAnalyticsItem.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -57,6 +58,16 @@
 
 namespace android {
 
+// key for media statistics
+static const char *kCodecKeyName = "codec";
+// attrs for media statistics
+static const char *kCodecCodec = "codec";               /* e.g. OMX.google.aac.decoder */
+static const char *kCodecMime = "mime";                 /* e.g. audio/mime */
+static const char *kCodecMode = "mode";                 /* audio, video */
+static const char *kCodecSecure = "secure";             /* 0, 1 */
+
+
+
 static int64_t getId(const sp<IResourceManagerClient> &client) {
     return (int64_t) client.get();
 }
@@ -476,11 +487,27 @@
     } else {
         mUid = uid;
     }
+    // set up our new record, get a sessionID, put it into the in-progress list
+    mAnalyticsItem = new MediaAnalyticsItem(kCodecKeyName);
+    if (mAnalyticsItem != NULL) {
+        (void) mAnalyticsItem->generateSessionID();
+        // don't record it yet; only at the end, when we have decided that we have
+        // data worth writing (e.g. .count() > 0)
+    }
 }
 
 MediaCodec::~MediaCodec() {
     CHECK_EQ(mState, UNINITIALIZED);
     mResourceManagerService->removeResource(getId(mResourceManagerClient));
+
+    if (mAnalyticsItem != NULL ) {
+        if (mAnalyticsItem->count() > 0) {
+            mAnalyticsItem->setFinalized(true);
+            mAnalyticsItem->selfrecord();
+        }
+        delete mAnalyticsItem;
+        mAnalyticsItem = NULL;
+    }
 }
 
 // static
@@ -600,6 +627,19 @@
         msg->setInt32("encoder", encoder);
     }
 
+    if (mAnalyticsItem != NULL) {
+        if (nameIsType) {
+            // name is the mime type
+            mAnalyticsItem->setCString(kCodecMime, name.c_str());
+        } else {
+            mAnalyticsItem->setCString(kCodecCodec, name.c_str());
+        }
+        mAnalyticsItem->setCString(kCodecMode, mIsVideo ? "video" : "audio");
+        //mAnalyticsItem->setInt32("type", nameIsType);
+        if (nameIsType)
+            mAnalyticsItem->setInt32("encoder", encoder);
+    }
+
     status_t err;
     Vector<MediaResource> resources;
     MediaResource::Type type =
@@ -652,6 +692,12 @@
             mRotationDegrees = 0;
         }
 
+        if (mAnalyticsItem != NULL) {
+            mAnalyticsItem->setInt32("width", mVideoWidth);
+            mAnalyticsItem->setInt32("height", mVideoHeight);
+            mAnalyticsItem->setInt32("rotation", mRotationDegrees);
+        }
+
         // Prevent possible integer overflow in downstream code.
         if (mInitIsEncoder
                 && (uint64_t)mVideoWidth * mVideoHeight > (uint64_t)INT32_MAX / 4) {
@@ -666,6 +712,10 @@
 
     if (crypto != NULL) {
         msg->setPointer("crypto", crypto.get());
+        if (mAnalyticsItem != NULL) {
+            // XXX: save indication that it's crypto in some way...
+            mAnalyticsItem->setInt32("crypto", 1);
+        }
     }
 
     // save msg for reset
@@ -1058,6 +1108,21 @@
     return OK;
 }
 
+status_t MediaCodec::getMetrics(Parcel *reply) {
+
+    // shouldn't happen, but be safe
+    if (mAnalyticsItem == NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    // XXX: go get current values for whatever in-flight data we want
+
+    // send it back to the caller.
+    mAnalyticsItem->writeToParcel(reply);
+
+    return OK;
+}
+
 status_t MediaCodec::getInputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const {
     sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
     msg->setInt32("portIndex", kPortIndexInput);
@@ -1403,6 +1468,10 @@
 
                     CHECK(msg->findString("componentName", &mComponentName));
 
+                    if (mComponentName.c_str()) {
+                        mAnalyticsItem->setCString(kCodecCodec, mComponentName.c_str());
+                    }
+
                     if (mComponentName.startsWith("OMX.google.")) {
                         mFlags |= kFlagUsesSoftwareRenderer;
                     } else {
@@ -1413,9 +1482,11 @@
                     if (mComponentName.endsWith(".secure")) {
                         mFlags |= kFlagIsSecure;
                         resourceType = MediaResource::kSecureCodec;
+                        mAnalyticsItem->setInt32(kCodecSecure, 1);
                     } else {
                         mFlags &= ~kFlagIsSecure;
                         resourceType = MediaResource::kNonSecureCodec;
+                        mAnalyticsItem->setInt32(kCodecSecure, 0);
                     }
 
                     if (mIsVideo) {
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 49f480d..677d43e 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -27,7 +27,6 @@
 #include "include/OggExtractor.h"
 #include "include/MPEG2PSExtractor.h"
 #include "include/MPEG2TSExtractor.h"
-#include "include/DRMExtractor.h"
 #include "include/FLACExtractor.h"
 #include "include/AACExtractor.h"
 #include "include/MidiExtractor.h"
@@ -37,6 +36,7 @@
 #include <binder/IServiceManager.h>
 #include <binder/MemoryDealer.h>
 
+#include <media/MediaAnalyticsItem.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/DataSource.h>
@@ -48,19 +48,46 @@
 #include <utils/String8.h>
 #include <private/android_filesystem_config.h>
 
+// still doing some on/off toggling here.
+#define MEDIA_LOG       1
+
 
 namespace android {
 
-MediaExtractor::MediaExtractor():
-    mIsDrm(false) {
+// key for media statistics
+static const char *KeyName_Extractor = "extractor";
+// attrs for media statistics
+
+MediaExtractor::MediaExtractor() {
     if (!LOG_NDEBUG) {
         uid_t uid = getuid();
         struct passwd *pw = getpwuid(uid);
         ALOGI("extractor created in uid: %d (%s)", getuid(), pw->pw_name);
     }
 
+    mAnalyticsItem = NULL;
+    if (MEDIA_LOG) {
+        mAnalyticsItem = new MediaAnalyticsItem(KeyName_Extractor);
+        (void) mAnalyticsItem->generateSessionID();
+    }
 }
 
+MediaExtractor::~MediaExtractor() {
+
+    // log the current record, provided it has some information worth recording
+    if (MEDIA_LOG) {
+        if (mAnalyticsItem != NULL) {
+            if (mAnalyticsItem->count() > 0) {
+                mAnalyticsItem->setFinalized(true);
+                mAnalyticsItem->selfrecord();
+            }
+        }
+    }
+    if (mAnalyticsItem != NULL) {
+        delete mAnalyticsItem;
+        mAnalyticsItem = NULL;
+    }
+}
 
 sp<MetaData> MediaExtractor::getMetaData() {
     return new MetaData;
@@ -148,23 +175,6 @@
         ALOGW("creating media extractor in calling process");
         return CreateFromService(source, mime);
     } else {
-        String8 mime8;
-        float confidence;
-        sp<AMessage> meta;
-
-        // Check if it's es-based DRM, since DRMExtractor needs to be created in the media server
-        // process, not the extractor process.
-        if (SniffDRM(source, &mime8, &confidence, &meta)) {
-            const char *drmMime = mime8.string();
-            ALOGV("Detected media content as '%s' with confidence %.2f", drmMime, confidence);
-            if (!strncmp(drmMime, "drm+es_based+", 13)) {
-                // DRMExtractor sets container metadata kKeyIsDRM to 1
-                return new DRMExtractor(source, drmMime + 14);
-            } else {
-                mime = drmMime + 20; // get real mimetype after "drm+container_based+" prefix
-            }
-        }
-
         // remote extractor
         ALOGV("get service manager");
         sp<IBinder> binder = defaultServiceManager()->getService(String16("media.extractor"));
@@ -187,6 +197,9 @@
     ALOGV("MediaExtractor::CreateFromService %s", mime);
     RegisterDefaultSniffers();
 
+    // initialize source decryption if needed
+    source->DrmInitialization();
+
     sp<AMessage> meta;
 
     String8 tmp;
@@ -230,6 +243,31 @@
         ret = new MidiExtractor(source);
     }
 
+    if (ret != NULL) {
+       // track the container format (mpeg, aac, wvm, etc)
+       if (MEDIA_LOG) {
+          if (ret->mAnalyticsItem != NULL) {
+              ret->mAnalyticsItem->setCString("fmt",  ret->name());
+              // tracks (size_t)
+              ret->mAnalyticsItem->setInt32("ntrk",  ret->countTracks());
+              // metadata
+              sp<MetaData> pMetaData = ret->getMetaData();
+              if (pMetaData != NULL) {
+                String8 xx = pMetaData->toString();
+                ALOGD("metadata says: %s", xx.string());
+                // can grab various fields like:
+                // 'titl' -- but this verges into PII
+                // 'mime'
+                const char *mime = NULL;
+                if (pMetaData->findCString(kKeyMIMEType, &mime)) {
+                    ret->mAnalyticsItem->setCString("mime",  mime);
+                }
+                // what else is interesting here?
+              }
+          }
+       }
+    }
+
     return ret;
 }
 
@@ -299,9 +337,6 @@
     RegisterSniffer_l(SniffMPEG2PS);
     RegisterSniffer_l(SniffMidi);
 
-    if (property_get_bool("drm.service.enabled", false)) {
-        RegisterSniffer_l(SniffDRM);
-    }
     gSniffersRegistered = true;
 }
 
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index b13877d..c7b8888 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -41,7 +41,7 @@
 MediaMuxer::MediaMuxer(int fd, OutputFormat format)
     : mFormat(format),
       mState(UNINITIALIZED) {
-    if (format == OUTPUT_FORMAT_MPEG_4) {
+    if (format == OUTPUT_FORMAT_MPEG_4 || format == OUTPUT_FORMAT_THREE_GPP) {
         mWriter = new MPEG4Writer(fd);
     } else if (format == OUTPUT_FORMAT_WEBM) {
         mWriter = new WebmWriter(fd);
@@ -108,8 +108,8 @@
         ALOGE("setLocation() must be called before start().");
         return INVALID_OPERATION;
     }
-    if (mFormat != OUTPUT_FORMAT_MPEG_4) {
-        ALOGE("setLocation() is only supported for .mp4 output.");
+    if (mFormat != OUTPUT_FORMAT_MPEG_4 && mFormat != OUTPUT_FORMAT_THREE_GPP) {
+        ALOGE("setLocation() is only supported for .mp4 pr .3gp output.");
         return INVALID_OPERATION;
     }
 
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index a29aff0..b4e694c 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -29,6 +29,8 @@
 
 #include "include/OMX.h"
 
+#include "omx/hal/1.0/utils/WOmx.h"
+
 namespace android {
 
 OMXClient::OMXClient() {
@@ -53,6 +55,21 @@
     return OK;
 }
 
+status_t OMXClient::connectTreble() {
+    using namespace ::android::hardware::media::omx::V1_0;
+    sp<IOmx> tOmx = IOmx::getService("default");
+    if (tOmx.get() == nullptr) {
+        ALOGE("Cannot obtain Treble IOmx.");
+        return NO_INIT;
+    }
+    if (!tOmx->isRemote()) {
+        ALOGE("Treble IOmx is in passthrough mode.");
+        return NO_INIT;
+    }
+    mOMX = new utils::LWOmx(tOmx);
+    return OK;
+}
+
 void OMXClient::disconnect() {
     mOMX.clear();
 }
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 8061bc6..de5ea9c 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -522,8 +522,6 @@
         return ERROR_MALFORMED;
     }
 
-    mSyncSampleOffset = data_offset;
-
     uint8_t header[8];
     if (mDataSource->readAt(
                 data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
@@ -535,13 +533,13 @@
         return ERROR_MALFORMED;
     }
 
-    mNumSyncSamples = U32_AT(&header[4]);
+    uint32_t numSyncSamples = U32_AT(&header[4]);
 
-    if (mNumSyncSamples < 2) {
+    if (numSyncSamples < 2) {
         ALOGV("Table of sync samples is empty or has only a single entry!");
     }
 
-    uint64_t allocSize = (uint64_t)mNumSyncSamples * sizeof(uint32_t);
+    uint64_t allocSize = (uint64_t)numSyncSamples * sizeof(uint32_t);
     if (allocSize > kMaxTotalSize) {
         ALOGE("Sync sample table size too large.");
         return ERROR_OUT_OF_RANGE;
@@ -559,19 +557,21 @@
         return ERROR_OUT_OF_RANGE;
     }
 
-    mSyncSamples = new (std::nothrow) uint32_t[mNumSyncSamples];
+    mSyncSamples = new (std::nothrow) uint32_t[numSyncSamples];
     if (!mSyncSamples) {
         ALOGE("Cannot allocate sync sample table with %llu entries.",
-                (unsigned long long)mNumSyncSamples);
+                (unsigned long long)numSyncSamples);
         return ERROR_OUT_OF_RANGE;
     }
 
-    if (mDataSource->readAt(mSyncSampleOffset + 8, mSyncSamples,
+    if (mDataSource->readAt(data_offset + 8, mSyncSamples,
             (size_t)allocSize) != (ssize_t)allocSize) {
+        delete[] mSyncSamples;
+        mSyncSamples = NULL;
         return ERROR_IO;
     }
 
-    for (size_t i = 0; i < mNumSyncSamples; ++i) {
+    for (size_t i = 0; i < numSyncSamples; ++i) {
         if (mSyncSamples[i] == 0) {
             ALOGE("b/32423862, unexpected zero value in stss");
             continue;
@@ -579,6 +579,9 @@
         mSyncSamples[i] = ntohl(mSyncSamples[i]) - 1;
     }
 
+    mSyncSampleOffset = data_offset;
+    mNumSyncSamples = numSyncSamples;
+
     return OK;
 }
 
@@ -989,4 +992,3 @@
 }
 
 }  // namespace android
-
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index f2638ed..ec02fb9 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1770,6 +1770,45 @@
     *sync = settings;
 }
 
+void writeToAMessage(const sp<AMessage> &msg, const BufferingSettings &buffering) {
+    msg->setInt32("init-mode", buffering.mInitialBufferingMode);
+    msg->setInt32("rebuffer-mode", buffering.mRebufferingMode);
+    msg->setInt32("init-ms", buffering.mInitialWatermarkMs);
+    msg->setInt32("init-kb", buffering.mInitialWatermarkKB);
+    msg->setInt32("rebuffer-low-ms", buffering.mRebufferingWatermarkLowMs);
+    msg->setInt32("rebuffer-high-ms", buffering.mRebufferingWatermarkHighMs);
+    msg->setInt32("rebuffer-low-kb", buffering.mRebufferingWatermarkLowKB);
+    msg->setInt32("rebuffer-high-kb", buffering.mRebufferingWatermarkHighKB);
+}
+
+void readFromAMessage(const sp<AMessage> &msg, BufferingSettings *buffering /* nonnull */) {
+    int32_t value;
+    if (msg->findInt32("init-mode", &value)) {
+        buffering->mInitialBufferingMode = (BufferingMode)value;
+    }
+    if (msg->findInt32("rebuffer-mode", &value)) {
+        buffering->mRebufferingMode = (BufferingMode)value;
+    }
+    if (msg->findInt32("init-ms", &value)) {
+        buffering->mInitialWatermarkMs = value;
+    }
+    if (msg->findInt32("init-kb", &value)) {
+        buffering->mInitialWatermarkKB = value;
+    }
+    if (msg->findInt32("rebuffer-low-ms", &value)) {
+        buffering->mRebufferingWatermarkLowMs = value;
+    }
+    if (msg->findInt32("rebuffer-high-ms", &value)) {
+        buffering->mRebufferingWatermarkHighMs = value;
+    }
+    if (msg->findInt32("rebuffer-low-kb", &value)) {
+        buffering->mRebufferingWatermarkLowKB = value;
+    }
+    if (msg->findInt32("rebuffer-high-kb", &value)) {
+        buffering->mRebufferingWatermarkHighKB = value;
+    }
+}
+
 AString nameForFd(int fd) {
     const size_t SIZE = 256;
     char buffer[SIZE];
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index 9f15cf7..6e7ef35 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -420,8 +420,8 @@
     meta->setInt32(kKeyWidth, width);
     meta->setInt32(kKeyHeight, height);
 
-    if (sarWidth > 1 || sarHeight > 1) {
-        // We treat 0:0 (unspecified) as 1:1.
+    if ((sarWidth > 0 && sarHeight > 0) && (sarWidth != 1 || sarHeight != 1)) {
+        // We treat *:0 and 0:* (unspecified) as 1:1.
 
         meta->setInt32(kKeySARWidth, sarWidth);
         meta->setInt32(kKeySARHeight, sarHeight);
@@ -457,7 +457,10 @@
     const uint8_t *nalStart;
     size_t nalSize;
     while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
-        CHECK_GT(nalSize, 0u);
+        if (nalSize == 0u) {
+            ALOGW("skipping empty nal unit from potentially malformed bitstream");
+            continue;
+        }
 
         unsigned nalType = nalStart[0] & 0x1f;
 
diff --git a/media/libstagefright/codec2/Android.mk b/media/libstagefright/codec2/Android.mk
index a463205..ef06ed7 100644
--- a/media/libstagefright/codec2/Android.mk
+++ b/media/libstagefright/codec2/Android.mk
@@ -11,7 +11,8 @@
 LOCAL_MODULE:= libstagefright_codec2
 LOCAL_CFLAGS += -Werror -Wall
 LOCAL_CLANG := true
-LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libstagefright/codecs/aacdec/Android.mk b/media/libstagefright/codecs/aacdec/Android.mk
index 6490f8f..29c0f33 100644
--- a/media/libstagefright/codecs/aacdec/Android.mk
+++ b/media/libstagefright/codecs/aacdec/Android.mk
@@ -19,7 +19,8 @@
 LOCAL_CFLAGS :=
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_STATIC_LIBRARIES := libFraunhoferAAC
 
diff --git a/media/libstagefright/codecs/aacenc/Android.mk b/media/libstagefright/codecs/aacenc/Android.mk
index 2f34e83..527687b 100644
--- a/media/libstagefright/codecs/aacenc/Android.mk
+++ b/media/libstagefright/codecs/aacenc/Android.mk
@@ -105,7 +105,8 @@
   LOCAL_CFLAGS :=
 
   LOCAL_CFLAGS += -Werror
-  LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+  LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow cfi
+  LOCAL_SANITIZE_DIAG := cfi
 
   LOCAL_STATIC_LIBRARIES := libFraunhoferAAC
 
@@ -130,7 +131,8 @@
   LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
 
   LOCAL_CFLAGS += -Werror
-  LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+  LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow cfi
+  LOCAL_SANITIZE_DIAG := cfi
 
   LOCAL_STATIC_LIBRARIES := \
           libstagefright_aacenc
diff --git a/media/libstagefright/codecs/amrwbenc/Android.mk b/media/libstagefright/codecs/amrwbenc/Android.mk
index 3395fc1..262f6c8 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.mk
+++ b/media/libstagefright/codecs/amrwbenc/Android.mk
@@ -119,7 +119,8 @@
 	frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_amrwbenc
diff --git a/media/libstagefright/codecs/avc/common/Android.mk b/media/libstagefright/codecs/avc/common/Android.mk
index ecf0884..9959554 100644
--- a/media/libstagefright/codecs/avc/common/Android.mk
+++ b/media/libstagefright/codecs/avc/common/Android.mk
@@ -17,6 +17,7 @@
  	$(LOCAL_PATH)/include
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
index cfca608..e451f30 100644
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -71,7 +71,8 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libstagefright/codecs/avcdec/Android.mk b/media/libstagefright/codecs/avcdec/Android.mk
index 9da8a6f..8c894fd 100644
--- a/media/libstagefright/codecs/avcdec/Android.mk
+++ b/media/libstagefright/codecs/avcdec/Android.mk
@@ -20,7 +20,9 @@
 LOCAL_SHARED_LIBRARIES  += libutils
 LOCAL_SHARED_LIBRARIES  += liblog
 
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_LDFLAGS := -Wl,-Bsymbolic
 
diff --git a/media/libstagefright/codecs/avcenc/Android.mk b/media/libstagefright/codecs/avcenc/Android.mk
index 1b1a1a0..23ba208 100644
--- a/media/libstagefright/codecs/avcenc/Android.mk
+++ b/media/libstagefright/codecs/avcenc/Android.mk
@@ -20,7 +20,9 @@
 LOCAL_SHARED_LIBRARIES  += libutils
 LOCAL_SHARED_LIBRARIES  += liblog
 
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_LDFLAGS := -Wl,-Bsymbolic
 
diff --git a/media/libstagefright/codecs/flac/enc/Android.mk b/media/libstagefright/codecs/flac/enc/Android.mk
index a3c37fb..8031bf6 100644
--- a/media/libstagefright/codecs/flac/enc/Android.mk
+++ b/media/libstagefright/codecs/flac/enc/Android.mk
@@ -10,7 +10,8 @@
         external/flac/include
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_SHARED_LIBRARIES := \
         libmedia libstagefright_omx libstagefright_foundation libutils liblog
diff --git a/media/libstagefright/codecs/g711/dec/Android.mk b/media/libstagefright/codecs/g711/dec/Android.mk
index 11978a1..b9a1f8c 100644
--- a/media/libstagefright/codecs/g711/dec/Android.mk
+++ b/media/libstagefright/codecs/g711/dec/Android.mk
@@ -15,6 +15,7 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/gsm/dec/Android.mk b/media/libstagefright/codecs/gsm/dec/Android.mk
index eed1348..401b56c 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.mk
+++ b/media/libstagefright/codecs/gsm/dec/Android.mk
@@ -10,7 +10,8 @@
         external/libgsm/inc
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_SHARED_LIBRARIES := \
         libmedia libstagefright_omx libutils liblog
diff --git a/media/libstagefright/codecs/hevcdec/Android.mk b/media/libstagefright/codecs/hevcdec/Android.mk
index 83e377c..11c1b89 100644
--- a/media/libstagefright/codecs/hevcdec/Android.mk
+++ b/media/libstagefright/codecs/hevcdec/Android.mk
@@ -13,7 +13,8 @@
 LOCAL_C_INCLUDES += $(TOP)/external/libhevc/common
 LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
 LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_SHARED_LIBRARIES  := libmedia
 LOCAL_SHARED_LIBRARIES  += libstagefright_omx
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.mk b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
index e83d24d..c20dc4d 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
@@ -76,6 +76,7 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.mk b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
index 7b706fe..f950052 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
@@ -72,7 +72,8 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libstagefright/codecs/mp3dec/Android.mk b/media/libstagefright/codecs/mp3dec/Android.mk
index 62dce35..5d153d1 100644
--- a/media/libstagefright/codecs/mp3dec/Android.mk
+++ b/media/libstagefright/codecs/mp3dec/Android.mk
@@ -76,7 +76,8 @@
         $(LOCAL_PATH)/include
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_SHARED_LIBRARIES := \
         libmedia libstagefright_omx libstagefright_foundation libutils liblog
diff --git a/media/libstagefright/codecs/mpeg2dec/Android.mk b/media/libstagefright/codecs/mpeg2dec/Android.mk
index 65a081e..0afc180 100644
--- a/media/libstagefright/codecs/mpeg2dec/Android.mk
+++ b/media/libstagefright/codecs/mpeg2dec/Android.mk
@@ -21,7 +21,8 @@
 LOCAL_SHARED_LIBRARIES  += liblog
 
 LOCAL_LDFLAGS := -Wl,-Bsymbolic
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libstagefright/codecs/on2/dec/Android.mk b/media/libstagefright/codecs/on2/dec/Android.mk
index de1e937..8bc7dbc 100644
--- a/media/libstagefright/codecs/on2/dec/Android.mk
+++ b/media/libstagefright/codecs/on2/dec/Android.mk
@@ -21,6 +21,7 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/on2/enc/Android.mk b/media/libstagefright/codecs/on2/enc/Android.mk
index 5c85bbd..747aae0 100644
--- a/media/libstagefright/codecs/on2/enc/Android.mk
+++ b/media/libstagefright/codecs/on2/enc/Android.mk
@@ -13,7 +13,9 @@
         frameworks/av/media/libstagefright/include \
         frameworks/native/include/media/openmax \
 
-LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow cfi
+
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_STATIC_LIBRARIES := \
         libvpx
diff --git a/media/libstagefright/codecs/on2/h264dec/Android.mk b/media/libstagefright/codecs/on2/h264dec/Android.mk
index f28e17b..10147ec 100644
--- a/media/libstagefright/codecs/on2/h264dec/Android.mk
+++ b/media/libstagefright/codecs/on2/h264dec/Android.mk
@@ -95,7 +95,9 @@
                         $(LOCAL_PATH)/./omxdl/arm_neon/vc/m4p10/api
 endif
 
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_SHARED_LIBRARIES := \
 	libmedia libstagefright_omx libstagefright_foundation libutils liblog \
diff --git a/media/libstagefright/codecs/opus/dec/Android.mk b/media/libstagefright/codecs/opus/dec/Android.mk
index 7b0ad2c..5b887b3 100644
--- a/media/libstagefright/codecs/opus/dec/Android.mk
+++ b/media/libstagefright/codecs/opus/dec/Android.mk
@@ -13,7 +13,9 @@
         libopus libmedia libstagefright_omx \
         libstagefright_foundation libutils liblog
 
-LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow cfi
+
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_MODULE := libstagefright_soft_opusdec
 LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/raw/Android.mk b/media/libstagefright/codecs/raw/Android.mk
index caed2cc..5fe260b 100644
--- a/media/libstagefright/codecs/raw/Android.mk
+++ b/media/libstagefright/codecs/raw/Android.mk
@@ -9,7 +9,8 @@
         frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_SHARED_LIBRARIES := \
         libstagefright_omx libstagefright_foundation libutils liblog
diff --git a/media/libstagefright/filters/Android.mk b/media/libstagefright/filters/Android.mk
index f8e8352..830d2aa 100644
--- a/media/libstagefright/filters/Android.mk
+++ b/media/libstagefright/filters/Android.mk
@@ -22,7 +22,9 @@
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 
-LOCAL_SHARED_LIBRARIES := libmedia
+LOCAL_SHARED_LIBRARIES := \
+        libmedia \
+        libhidlmemory \
 
 LOCAL_MODULE:= libstagefright_mediafilter
 
diff --git a/media/libstagefright/filters/GraphicBufferListener.cpp b/media/libstagefright/filters/GraphicBufferListener.cpp
index c1aaa17..db061c1 100644
--- a/media/libstagefright/filters/GraphicBufferListener.cpp
+++ b/media/libstagefright/filters/GraphicBufferListener.cpp
@@ -22,6 +22,7 @@
 #include <media/stagefright/MediaErrors.h>
 
 #include <gui/BufferItem.h>
+#include <utils/String8.h>
 
 #include "GraphicBufferListener.h"
 
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index 045e044..1b0db33 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -22,8 +22,8 @@
 
 #include "AMessage.h"
 
-#include <android/log.h>
 #include <binder/Parcel.h>
+#include <log/log.h>
 
 #include "AAtomizer.h"
 #include "ABuffer.h"
diff --git a/media/libstagefright/http/Android.mk b/media/libstagefright/http/Android.mk
index a7bd6a2..9c16f40 100644
--- a/media/libstagefright/http/Android.mk
+++ b/media/libstagefright/http/Android.mk
@@ -22,7 +22,8 @@
 LOCAL_CFLAGS += -Wno-multichar
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libstagefright/httplive/Android.mk b/media/libstagefright/httplive/Android.mk
index 6a71b7c..1903d10 100644
--- a/media/libstagefright/httplive/Android.mk
+++ b/media/libstagefright/httplive/Android.mk
@@ -14,7 +14,8 @@
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_SHARED_LIBRARIES := \
         liblog \
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 477280a..e144942 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -49,11 +49,6 @@
 const int64_t LiveSession::kUpSwitchMarginUs = 5000000ll;
 const int64_t LiveSession::kResumeThresholdUs = 100000ll;
 
-// Buffer Prepare/Ready/Underflow Marks
-const int64_t LiveSession::kReadyMarkUs = 5000000ll;
-const int64_t LiveSession::kPrepareMarkUs = 1500000ll;
-const int64_t LiveSession::kUnderflowMarkUs = 1000000ll;
-
 struct LiveSession::BandwidthEstimator : public RefBase {
     BandwidthEstimator();
 
@@ -495,6 +490,13 @@
     return new HTTPDownloader(mHTTPService, mExtraHeaders);
 }
 
+void LiveSession::setBufferingSettings(
+        const BufferingSettings &buffering) {
+    sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+    writeToAMessage(msg, buffering);
+    msg->post();
+}
+
 void LiveSession::connectAsync(
         const char *url, const KeyedVector<String8, String8> *headers) {
     sp<AMessage> msg = new AMessage(kWhatConnect, this);
@@ -620,6 +622,12 @@
 
 void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
+        case kWhatSetBufferingSettings:
+        {
+            readFromAMessage(msg, &mBufferingSettings);
+            break;
+        }
+
         case kWhatConnect:
         {
             onConnect(msg);
@@ -830,7 +838,10 @@
                     // If switching up, require a cushion bigger than kUnderflowMark
                     // to avoid buffering immediately after the switch.
                     // (If we don't have that cushion we'd rather cancel and try again.)
-                    int64_t delayUs = switchUp ? (kUnderflowMarkUs + 1000000ll) : 0;
+                    int64_t delayUs =
+                        switchUp ?
+                            (mBufferingSettings.mRebufferingWatermarkLowMs * 1000ll + 1000000ll)
+                            : 0;
                     bool needResumeUntil = false;
                     sp<AMessage> stopParams = msg;
                     if (checkSwitchProgress(stopParams, delayUs, &needResumeUntil)) {
@@ -2189,13 +2200,16 @@
         }
 
         ++activeCount;
-        int64_t readyMark = mInPreparationPhase ? kPrepareMarkUs : kReadyMarkUs;
-        if (bufferedDurationUs > readyMark
+        int64_t readyMarkUs =
+            (mInPreparationPhase ?
+                mBufferingSettings.mInitialWatermarkMs :
+                mBufferingSettings.mRebufferingWatermarkHighMs) * 1000ll;
+        if (bufferedDurationUs > readyMarkUs
                 || mPacketSources[i]->isFinished(0)) {
             ++readyCount;
         }
         if (!mPacketSources[i]->isFinished(0)) {
-            if (bufferedDurationUs < kUnderflowMarkUs) {
+            if (bufferedDurationUs < mBufferingSettings.mRebufferingWatermarkLowMs * 1000ll) {
                 ++underflowCount;
             }
             if (bufferedDurationUs > mUpSwitchMark) {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index a0138be..abf8cf0 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -18,6 +18,7 @@
 
 #define LIVE_SESSION_H_
 
+#include <media/BufferingSettings.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/mediaplayer.h>
 
@@ -72,6 +73,8 @@
             uint32_t flags,
             const sp<IMediaHTTPService> &httpService);
 
+    void setBufferingSettings(const BufferingSettings &buffering);
+
     int64_t calculateMediaTimeUs(int64_t firstTimeUs, int64_t timeUs, int32_t discontinuitySeq);
     status_t dequeueAccessUnit(StreamType stream, sp<ABuffer> *accessUnit);
 
@@ -129,6 +132,7 @@
         kWhatChangeConfiguration2       = 'chC2',
         kWhatChangeConfiguration3       = 'chC3',
         kWhatPollBuffering              = 'poll',
+        kWhatSetBufferingSettings       = 'sBuS',
     };
 
     // Bandwidth Switch Mark Defaults
@@ -138,9 +142,7 @@
     static const int64_t kResumeThresholdUs;
 
     // Buffer Prepare/Ready/Underflow Marks
-    static const int64_t kReadyMarkUs;
-    static const int64_t kPrepareMarkUs;
-    static const int64_t kUnderflowMarkUs;
+    BufferingSettings mBufferingSettings;
 
     struct BandwidthEstimator;
     struct BandwidthItem {
diff --git a/media/libstagefright/include/ACodecBufferChannel.h b/media/libstagefright/include/ACodecBufferChannel.h
index d52ce53..ce9bd3c 100644
--- a/media/libstagefright/include/ACodecBufferChannel.h
+++ b/media/libstagefright/include/ACodecBufferChannel.h
@@ -115,6 +115,7 @@
     const sp<AMessage> mOutputBufferDrained;
 
     sp<MemoryDealer> mDealer;
+    sp<IMemory> mDecryptDestination;
 
     // These should only be accessed via std::atomic_* functions.
     //
diff --git a/media/libstagefright/include/DRMExtractor.h b/media/libstagefright/include/DRMExtractor.h
deleted file mode 100644
index 3dc7df8..0000000
--- a/media/libstagefright/include/DRMExtractor.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef DRM_EXTRACTOR_H_
-
-#define DRM_EXTRACTOR_H_
-
-#include <media/IMediaSource.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <drm/DrmManagerClient.h>
-
-namespace android {
-
-struct AMessage;
-class DataSource;
-class SampleTable;
-class String8;
-class DecryptHandle;
-
-class DRMExtractor : public MediaExtractor {
-public:
-    DRMExtractor(const sp<DataSource> &source, const char *mime);
-
-    virtual size_t countTracks();
-    virtual sp<IMediaSource> getTrack(size_t index);
-    virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-    virtual sp<MetaData> getMetaData();
-    virtual const char * name() { return "DRMExtractor"; }
-
-protected:
-    virtual ~DRMExtractor();
-
-private:
-    sp<DataSource> mDataSource;
-
-    sp<IMediaExtractor> mOriginalExtractor;
-    sp<DecryptHandle> mDecryptHandle;
-    DrmManagerClient* mDrmManagerClient;
-
-    DRMExtractor(const DRMExtractor &);
-    DRMExtractor &operator=(const DRMExtractor &);
-};
-
-bool SniffDRM(
-        const sp<DataSource> &source, String8 *mimeType, float *confidence,
-            sp<AMessage> *);
-
-}  // namespace android
-
-#endif  // DRM_EXTRACTOR_H_
-
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index ca24c2f..c7e60ca 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -18,6 +18,8 @@
 
 #define OMX_NODE_INSTANCE_H_
 
+#include <atomic>
+
 #include <media/IOMX.h>
 #include <utils/RefBase.h>
 #include <utils/threads.h>
@@ -25,11 +27,14 @@
 #include <utils/SortedVector.h>
 #include "OmxNodeOwner.h"
 
+#include <android/hidl/memory/1.0/IMemory.h>
+
 namespace android {
 class IOMXBufferSource;
 class IOMXObserver;
 struct OMXMaster;
 class OMXBuffer;
+typedef hidl::memory::V1_0::IMemory IHidlMemory;
 
 struct OMXNodeInstance : public BnOMXNode {
     OMXNodeInstance(
@@ -111,7 +116,7 @@
     OMX_HANDLETYPE mHandle;
     sp<IOMXObserver> mObserver;
     sp<CallbackDispatcher> mDispatcher;
-    bool mDying;
+    std::atomic_bool mDying;
     bool mSailed;  // configuration is set (no more meta-mode changes)
     bool mQueriedProhibitedExtensions;
     SortedVector<OMX_INDEXTYPE> mProhibitedExtensions;
@@ -182,7 +187,7 @@
 
     status_t useBuffer_l(
             OMX_U32 portIndex, const sp<IMemory> &params,
-            IOMX::buffer_id *buffer);
+            const sp<IHidlMemory> &hParams, IOMX::buffer_id *buffer);
 
     status_t useGraphicBuffer_l(
             OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
diff --git a/media/libstagefright/include/SharedMemoryBuffer.h b/media/libstagefright/include/SharedMemoryBuffer.h
index 1d7f7a6..92df68a 100644
--- a/media/libstagefright/include/SharedMemoryBuffer.h
+++ b/media/libstagefright/include/SharedMemoryBuffer.h
@@ -19,6 +19,7 @@
 #define SHARED_MEMORY_BUFFER_H_
 
 #include <media/MediaCodecBuffer.h>
+#include <android/hidl/memory/1.0/IMemory.h>
 
 namespace android {
 
@@ -30,7 +31,9 @@
  */
 class SharedMemoryBuffer : public MediaCodecBuffer {
 public:
+    typedef ::android::hidl::memory::V1_0::IMemory TMemory;
     SharedMemoryBuffer(const sp<AMessage> &format, const sp<IMemory> &mem);
+    SharedMemoryBuffer(const sp<AMessage> &format, const sp<TMemory> &mem);
 
     virtual ~SharedMemoryBuffer() = default;
 
@@ -38,6 +41,7 @@
     SharedMemoryBuffer() = delete;
 
     const sp<IMemory> mMemory;
+    const sp<TMemory> mTMemory;
 };
 
 }  // namespace android
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index 72f8043..82b8143 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -17,7 +17,8 @@
 LOCAL_C_INCLUDES += \
         $(TOP)/frameworks/av/media/libstagefright \
         $(TOP)/frameworks/native/include/media/hardware \
-        $(TOP)/frameworks/native/include/media/openmax
+        $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/system/libhidl/base/include \
 
 LOCAL_SHARED_LIBRARIES :=               \
         libbinder                       \
@@ -28,14 +29,19 @@
         libgui                          \
         libcutils                       \
         libstagefright_foundation       \
-        libdl
+        libdl                           \
+        libhidlbase                     \
+        libhidlmemory                   \
+        android.hidl.memory@1.0         \
 
 LOCAL_MODULE:= libstagefright_omx
 LOCAL_CFLAGS += -Werror -Wall
-LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 include $(BUILD_SHARED_LIBRARY)
 
 ################################################################################
 
+include $(call all-makefiles-under,$(LOCAL_PATH)/hal)
 include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 7907c62..80c125c 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -139,18 +139,20 @@
         if (index < 0) {
             // This could conceivably happen if the observer dies at roughly the
             // same time that a client attempts to free the node explicitly.
-            return OK;
+
+            // NOTE: it's guaranteed that this method is called at most once per
+            //       instance.
+            ALOGV("freeNode: instance already removed from book-keeping.");
+        } else {
+            mLiveNodes.removeItemsAt(index);
+            IInterface::asBinder(instance->observer())->unlinkToDeath(this);
         }
-        mLiveNodes.removeItemsAt(index);
     }
 
-    IInterface::asBinder(instance->observer())->unlinkToDeath(this);
-
-    OMX_ERRORTYPE err = OMX_ErrorNone;
-    if (instance->handle() != NULL) {
-        err = mMaster->destroyComponentInstance(
-                static_cast<OMX_COMPONENTTYPE *>(instance->handle()));
-    }
+    CHECK(instance->handle() != NULL);
+    OMX_ERRORTYPE err = mMaster->destroyComponentInstance(
+            static_cast<OMX_COMPONENTTYPE *>(instance->handle()));
+    ALOGV("freeNode: handle destroyed: %p", instance->handle());
 
     return StatusFromOMXError(err);
 }
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index c20e9fc..39ed759 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -42,6 +42,8 @@
 #include <utils/NativeHandle.h>
 #include <media/OMXBuffer.h>
 
+#include <hidlmemory/mapping.h>
+
 static const OMX_U32 kPortIndexInput = 0;
 static const OMX_U32 kPortIndexOutput = 1;
 
@@ -106,8 +108,10 @@
 
 struct BufferMeta {
     explicit BufferMeta(
-            const sp<IMemory> &mem, OMX_U32 portIndex, bool copy, OMX_U8 *backup)
+            const sp<IMemory> &mem, const sp<IHidlMemory> &hidlMemory,
+            OMX_U32 portIndex, bool copy, OMX_U8 *backup)
         : mMem(mem),
+          mHidlMemory(hidlMemory),
           mCopyFromOmx(portIndex == kPortIndexOutput && copy),
           mCopyToOmx(portIndex == kPortIndexInput && copy),
           mPortIndex(portIndex),
@@ -129,6 +133,12 @@
           mBackup(NULL) {
     }
 
+    OMX_U8 *getPointer() {
+        return mMem.get() ? static_cast<OMX_U8*>(mMem->pointer()) :
+                mHidlMemory.get() ? static_cast<OMX_U8*>(
+                static_cast<void*>(mHidlMemory->getPointer())) : nullptr;
+    }
+
     void CopyFromOMX(const OMX_BUFFERHEADERTYPE *header) {
         if (!mCopyFromOmx) {
             return;
@@ -137,7 +147,7 @@
         // check component returns proper range
         sp<ABuffer> codec = getBuffer(header, true /* limit */);
 
-        memcpy((OMX_U8 *)mMem->pointer() + header->nOffset, codec->data(), codec->size());
+        memcpy(getPointer() + header->nOffset, codec->data(), codec->size());
     }
 
     void CopyToOMX(const OMX_BUFFERHEADERTYPE *header) {
@@ -146,7 +156,7 @@
         }
 
         memcpy(header->pBuffer + header->nOffset,
-                (const OMX_U8 *)mMem->pointer() + header->nOffset,
+                getPointer() + header->nOffset,
                 header->nFilledLen);
     }
 
@@ -184,6 +194,7 @@
     sp<GraphicBuffer> mGraphicBuffer;
     sp<NativeHandle> mNativeHandle;
     sp<IMemory> mMem;
+    sp<IHidlMemory> mHidlMemory;
     bool mCopyFromOmx;
     bool mCopyToOmx;
     OMX_U32 mPortIndex;
@@ -398,15 +409,9 @@
 }
 
 status_t OMXNodeInstance::freeNode() {
-
     CLOG_LIFE(freeNode, "handle=%p", mHandle);
     static int32_t kMaxNumIterations = 10;
 
-    // exit if we have already freed the node
-    if (mHandle == NULL) {
-        return mOwner->freeNode(this);
-    }
-
     // Transition the node from its current state all the way down
     // to "Loaded".
     // This ensures that all active buffers are properly freed even
@@ -416,7 +421,13 @@
     // The code below may trigger some more events to be dispatched
     // by the OMX component - we want to ignore them as our client
     // does not expect them.
-    mDying = true;
+    bool expected = false;
+    if (!mDying.compare_exchange_strong(expected, true)) {
+        // exit if we have already freed the node or doing so right now.
+        // NOTE: this ensures that the block below executes at most once.
+        ALOGV("Already dying");
+        return OK;
+    }
 
     OMX_STATETYPE state;
     CHECK_EQ(OMX_GetState(mHandle, &state), OMX_ErrorNone);
@@ -1021,29 +1032,49 @@
     Mutex::Autolock autoLock(mLock);
 
     switch (omxBuffer.mBufferType) {
-    case OMXBuffer::kBufferTypePreset:
-        return useBuffer_l(portIndex, NULL, buffer);
+        case OMXBuffer::kBufferTypePreset:
+            return useBuffer_l(portIndex, NULL, NULL, buffer);
 
-    case OMXBuffer::kBufferTypeSharedMem:
-        return useBuffer_l(portIndex, omxBuffer.mMem, buffer);
+        case OMXBuffer::kBufferTypeSharedMem:
+            return useBuffer_l(portIndex, omxBuffer.mMem, NULL, buffer);
 
-    case OMXBuffer::kBufferTypeANWBuffer:
-        return useGraphicBuffer_l(portIndex, omxBuffer.mGraphicBuffer, buffer);
+        case OMXBuffer::kBufferTypeANWBuffer:
+            return useGraphicBuffer_l(portIndex, omxBuffer.mGraphicBuffer, buffer);
 
-    default:
-        break;
+        case OMXBuffer::kBufferTypeHidlMemory: {
+                sp<IHidlMemory> hidlMemory = mapMemory(omxBuffer.mHidlMemory);
+                return useBuffer_l(portIndex, NULL, hidlMemory, buffer);
+            }
+        default:
+            break;
     }
 
     return BAD_VALUE;
 }
 
 status_t OMXNodeInstance::useBuffer_l(
-        OMX_U32 portIndex, const sp<IMemory> &params, IOMX::buffer_id *buffer) {
+        OMX_U32 portIndex, const sp<IMemory> &params,
+        const sp<IHidlMemory> &hParams, IOMX::buffer_id *buffer) {
     BufferMeta *buffer_meta;
     OMX_BUFFERHEADERTYPE *header;
     OMX_ERRORTYPE err = OMX_ErrorNone;
     bool isMetadata = mMetadataType[portIndex] != kMetadataBufferTypeInvalid;
 
+    size_t paramsSize;
+    void* paramsPointer;
+    if (params != NULL && hParams != NULL) {
+        return BAD_VALUE;
+    }
+    if (params != NULL) {
+        paramsPointer = params->pointer();
+        paramsSize = params->size();
+    } else if (hParams != NULL) {
+        paramsPointer = hParams->getPointer();
+        paramsSize = hParams->getSize();
+    } else {
+        paramsPointer = nullptr;
+    }
+
     OMX_U32 allottedSize;
     if (isMetadata) {
         if (mMetadataType[portIndex] == kMetadataBufferTypeGrallocSource) {
@@ -1057,11 +1088,11 @@
         }
     } else {
         // NULL params is allowed only in metadata mode.
-        if (params == NULL) {
+        if (paramsPointer == nullptr) {
             ALOGE("b/25884056");
             return BAD_VALUE;
         }
-        allottedSize = params->size();
+        allottedSize = paramsSize;
     }
 
     bool isOutputGraphicMetadata = (portIndex == kPortIndexOutput) &&
@@ -1077,7 +1108,7 @@
     if (!isOutputGraphicMetadata && (mQuirks & requiresAllocateBufferBit)) {
         // metadata buffers are not connected cross process; only copy if not meta.
         buffer_meta = new BufferMeta(
-                    params, portIndex, !isMetadata /* copy */, NULL /* data */);
+                    params, hParams, portIndex, !isMetadata /* copy */, NULL /* data */);
 
         err = OMX_AllocateBuffer(
                 mHandle, &header, portIndex, buffer_meta, allottedSize);
@@ -1085,7 +1116,7 @@
         if (err != OMX_ErrorNone) {
             CLOG_ERROR(allocateBuffer, err,
                     SIMPLE_BUFFER(portIndex, (size_t)allottedSize,
-                            params != NULL ? params->pointer() : NULL));
+                            paramsPointer));
         }
     } else {
         OMX_U8 *data = NULL;
@@ -1100,12 +1131,12 @@
             memset(data, 0, allottedSize);
 
             buffer_meta = new BufferMeta(
-                    params, portIndex, false /* copy */, data);
+                    params, hParams, portIndex, false /* copy */, data);
         } else {
-            data = static_cast<OMX_U8 *>(params->pointer());
+            data = static_cast<OMX_U8 *>(paramsPointer);
 
             buffer_meta = new BufferMeta(
-                    params, portIndex, false /* copy */, NULL);
+                    params, hParams, portIndex, false /* copy */, NULL);
         }
 
         err = OMX_UseBuffer(
@@ -1139,7 +1170,7 @@
     }
 
     CLOG_BUFFER(useBuffer, NEW_BUFFER_FMT(
-            *buffer, portIndex, "%u(%zu)@%p", allottedSize, params->size(), params->pointer()));
+            *buffer, portIndex, "%u(%zu)@%p", allottedSize, paramsSize, paramsPointer));
     return OK;
 }
 
@@ -1281,7 +1312,7 @@
         return BAD_VALUE;
     }
 
-    status_t err = useBuffer_l(portIndex, NULL, buffer);
+    status_t err = useBuffer_l(portIndex, NULL, NULL, buffer);
     if (err != OK) {
         return err;
     }
@@ -1559,7 +1590,8 @@
     switch (omxBuffer.mBufferType) {
     case OMXBuffer::kBufferTypePreset:
         return emptyBuffer_l(
-                buffer, 0, omxBuffer.mRangeLength, flags, timestamp, fenceFd);
+                buffer, omxBuffer.mRangeOffset, omxBuffer.mRangeLength,
+                flags, timestamp, fenceFd);
 
     case OMXBuffer::kBufferTypeANWBuffer:
         return emptyGraphicBuffer_l(
diff --git a/media/libstagefright/omx/hal/1.0/Android.mk b/media/libstagefright/omx/hal/1.0/Android.mk
index 1633486..c14e909 100644
--- a/media/libstagefright/omx/hal/1.0/Android.mk
+++ b/media/libstagefright/omx/hal/1.0/Android.mk
@@ -1,38 +1,3 @@
 LOCAL_PATH := $(call my-dir)
+include $(call all-makefiles-under,$(LOCAL_PATH))
 
-include $(CLEAR_VARS)
-LOCAL_MODULE := android.hardware.media.omx@1.0-impl
-LOCAL_MODULE_RELATIVE_PATH := hw
-LOCAL_SRC_FILES := \
-    WGraphicBufferSource.cpp \
-    WOmx.cpp \
-    WOmxBufferSource.cpp \
-    WOmxNode.cpp \
-    WOmxObserver.cpp \
-    Omx.cpp \
-    OmxNode.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
-    libmedia \
-    libstagefright_foundation \
-    libstagefright_omx \
-    libui \
-    libhidlbase \
-    libhidltransport \
-    libhwbinder \
-    libutils \
-    libcutils \
-    libbinder \
-    android.hardware.media.omx@1.0 \
-    android.hardware.graphics.common@1.0 \
-    android.hardware.media@1.0 \
-    android.hidl.base@1.0 \
-
-LOCAL_C_INCLUDES += \
-        $(TOP) \
-        $(TOP)/frameworks/av/include/media \
-        $(TOP)/frameworks/av/media/libstagefright/include \
-        $(TOP)/frameworks/native/include/media/hardware \
-        $(TOP)/frameworks/native/include/media/openmax
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/omx/hal/1.0/Conversion.h b/media/libstagefright/omx/hal/1.0/Conversion.h
deleted file mode 100644
index d42e5bf..0000000
--- a/media/libstagefright/omx/hal/1.0/Conversion.h
+++ /dev/null
@@ -1,799 +0,0 @@
-#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
-#define ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
-
-#include <hidl/MQDescriptor.h>
-
-#include <unistd.h>
-#include <vector>
-#include <list>
-
-#include <frameworks/native/include/binder/Binder.h>
-#include <frameworks/native/include/binder/Status.h>
-
-#include <OMXFenceParcelable.h>
-#include <cutils/native_handle.h>
-
-#include <IOMX.h>
-#include <VideoAPI.h>
-#include <OMXBuffer.h>
-#include <android/IOMXBufferSource.h>
-#include <android/IGraphicBufferSource.h>
-
-#include <android/hardware/media/omx/1.0/types.h>
-#include <android/hardware/media/omx/1.0/IOmx.h>
-#include <android/hardware/media/omx/1.0/IOmxNode.h>
-#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
-#include <android/hardware/media/omx/1.0/IOmxObserver.h>
-#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace implementation {
-
-using ::android::hardware::hidl_array;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::sp;
-
-using ::android::hardware::hidl_handle;
-using ::android::String8;
-using ::android::OMXFenceParcelable;
-
-using ::android::hardware::media::omx::V1_0::Message;
-using ::android::omx_message;
-
-using ::android::hardware::media::omx::V1_0::ColorAspects;
-
-using ::android::hardware::graphics::common::V1_0::Dataspace;
-
-using ::android::hardware::graphics::common::V1_0::PixelFormat;
-
-using ::android::OMXBuffer;
-
-using ::android::hardware::media::omx::V1_0::IOmx;
-using ::android::IOMX;
-
-using ::android::hardware::media::omx::V1_0::IOmxNode;
-using ::android::IOMXNode;
-
-using ::android::hardware::media::omx::V1_0::IOmxObserver;
-using ::android::IOMXObserver;
-
-using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
-using ::android::IOMXBufferSource;
-
-// native_handle_t helper functions.
-
-/**
- * \brief Take an fd and create a native handle containing only the given fd.
- * The created handle will need to be deleted manually with
- * `native_handle_delete()`.
- *
- * \param[in] fd The source file descriptor (of type `int`).
- * \return The create `native_handle_t*` that contains the given \p fd. If the
- * supplied \p fd is negative, the created native handle will contain no file
- * descriptors.
- *
- * If the native handle cannot be created, the return value will be
- * `nullptr`.
- *
- * This function does not duplicate the file descriptor.
- */
-inline native_handle_t* native_handle_create_from_fd(int fd) {
-    if (fd < 0) {
-        return native_handle_create(0, 0);
-    }
-    native_handle_t* nh = native_handle_create(1, 0);
-    if (nh == nullptr) {
-        return nullptr;
-    }
-    nh->data[0] = fd;
-    return nh;
-}
-
-/**
- * \brief Extract a file descriptor from a native handle.
- *
- * \param[in] nh The source `native_handle_t*`.
- * \param[in] index The index of the file descriptor in \p nh to read from. This
- * input has the default value of `0`.
- * \return The `index`-th file descriptor in \p nh. If \p nh does not have
- * enough file descriptors, the returned value will be `-1`.
- *
- * This function does not duplicate the file descriptor.
- */
-inline int native_handle_read_fd(native_handle_t const* nh, int index = 0) {
-    return ((nh == nullptr) || (nh->numFds == 0) ||
-            (nh->numFds <= index) || (index < 0)) ?
-            -1 : nh->data[index];
-}
-
-/**
- * Conversion functions
- * ====================
- *
- * There are two main directions of conversion:
- * - `inTargetType(...)`: Create a wrapper whose lifetime depends on the
- *   input. The wrapper has type `TargetType`.
- * - `toTargetType(...)`: Create a standalone object of type `TargetType` that
- *   corresponds to the input. The lifetime of the output does not depend on the
- *   lifetime of the input.
- * - `wrapIn(TargetType*, ...)`: Same as `inTargetType()`, but for `TargetType`
- *   that cannot be copied and/or moved efficiently, or when there are multiple
- *   output arguments.
- * - `convertTo(TargetType*, ...)`: Same as `toTargetType()`, but for
- *   `TargetType` that cannot be copied and/or moved efficiently, or when there
- *   are multiple output arguments.
- *
- * `wrapIn()` and `convertTo()` functions will take output arguments before
- * input arguments. Some of these functions might return a value to indicate
- * success or error.
- *
- * In converting or wrapping something as a Treble type that contains a
- * `hidl_handle`, `native_handle_t*` will need to be created and returned as
- * an additional output argument, hence only `wrapIn()` or `convertTo()` would
- * be available. The caller must call `native_handle_delete()` to deallocate the
- * returned native handle when it is no longer needed.
- *
- * For types that contain file descriptors, `inTargetType()` and `wrapAs()` do
- * not perform duplication of file descriptors, while `toTargetType()` and
- * `convertTo()` do.
- */
-
-/**
- * \brief Convert `binder::Status` to `Return<void>`.
- *
- * \param[in] l The source `binder::Status`.
- * \return The corresponding `Return<void>`.
- */
-// convert: ::android::binder::Status -> Return<void>
-inline Return<void> toHardwareStatus(
-        ::android::binder::Status const& l) {
-    if (l.exceptionCode() == ::android::binder::Status::EX_SERVICE_SPECIFIC) {
-        return ::android::hardware::Status::fromServiceSpecificError(
-                l.serviceSpecificErrorCode(),
-                l.exceptionMessage());
-    }
-    return ::android::hardware::Status::fromExceptionCode(
-            l.exceptionCode(),
-            l.exceptionMessage());
-}
-
-/**
- * \brief Convert `Return<void>` to `binder::Status`.
- *
- * \param[in] t The source `Return<void>`.
- * \return The corresponding `binder::Status`.
- */
-// convert: Return<void> -> ::android::binder::Status
-inline ::android::binder::Status toBinderStatus(
-        Return<void> const& t) {
-    return ::android::binder::Status::fromExceptionCode(
-            t.isOk() ? OK : UNKNOWN_ERROR,
-            t.description().c_str());
-}
-
-/**
- * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
- * calls.
- *
- * \param[in] t The source `Return<Status>`.
- * \return The corresponding `status_t`.
- *
- * This function first check if \p t has a transport error. If it does, then the
- * return value is the transport error code. Otherwise, the return value is
- * converted from `Status` contained inside \p t.
- *
- * Note:
- * - This `Status` is omx-specific. It is defined in `types.hal`.
- * - The name of this function is not `convert`.
- */
-// convert: Status -> status_t
-inline status_t toStatusT(Return<Status> const& t) {
-    return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
-}
-
-/**
- * \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
- *
- * \param[in] t The source `Return<void>`.
- * \return The corresponding `status_t`.
- */
-// convert: Return<void> -> status_t
-inline status_t toStatusT(Return<void> const& t) {
-    return t.isOk() ? OK : UNKNOWN_ERROR;
-}
-
-/**
- * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
- *
- * \param[in] t The source `Status`.
- * \return the corresponding `status_t`.
- */
-// convert: Status -> status_t
-inline status_t toStatusT(Status const& t) {
-    return static_cast<status_t>(t);
-}
-
-/**
- * \brief Convert `status_t` to `Status`.
- *
- * \param[in] l The source `status_t`.
- * \return The corresponding `Status`.
- */
-// convert: status_t -> Status
-inline Status toStatus(status_t l) {
-    return static_cast<Status>(l);
-}
-
-/**
- * \brief Wrap `native_handle_t*` in `hidl_handle`.
- *
- * \param[in] nh The source `native_handle_t*`.
- * \return The `hidl_handle` that points to \p nh.
- */
-// wrap: native_handle_t* -> hidl_handle
-inline hidl_handle inHidlHandle(native_handle_t const* nh) {
-    return hidl_handle(nh);
-}
-
-/**
- * \brief Wrap an `omx_message` and construct the corresponding `Message`.
- *
- * \param[out] t The wrapper of type `Message`.
- * \param[out] nh The native_handle_t referred to by `t->fence`.
- * \param[in] l The source `omx_message`.
- * \return `true` if the wrapping is successful; `false` otherwise.
- *
- * Upon success, \p nh will be created to hold the file descriptor stored in
- * `l.fenceFd`, and `t->fence` will point to \p nh. \p nh will need to be
- * destroyed manually by `native_handle_delete()` when \p t is no longer needed.
- *
- * Upon failure, \p nh will not be created and will not need to be deleted. \p t
- * will be invalid.
- */
-// wrap, omx_message -> Message, native_handle_t*
-inline bool wrapAs(Message* t, native_handle_t** nh, omx_message const& l) {
-    *nh = native_handle_create_from_fd(l.fenceFd);
-    if (!*nh) {
-        return false;
-    }
-    t->fence = inHidlHandle(*nh);
-    switch (l.type) {
-        case omx_message::EVENT:
-            t->type = Message::Type::EVENT;
-            t->data.eventData.data1 = l.u.event_data.data1;
-            t->data.eventData.data2 = l.u.event_data.data2;
-            t->data.eventData.data3 = l.u.event_data.data3;
-            t->data.eventData.data4 = l.u.event_data.data4;
-            break;
-        case omx_message::EMPTY_BUFFER_DONE:
-            t->type = Message::Type::EMPTY_BUFFER_DONE;
-            t->data.bufferData.buffer = l.u.buffer_data.buffer;
-            break;
-        case omx_message::FILL_BUFFER_DONE:
-            t->type = Message::Type::FILL_BUFFER_DONE;
-            t->data.extendedBufferData.buffer = l.u.extended_buffer_data.buffer;
-            t->data.extendedBufferData.rangeOffset = l.u.extended_buffer_data.range_offset;
-            t->data.extendedBufferData.rangeLength = l.u.extended_buffer_data.range_length;
-            t->data.extendedBufferData.flags = l.u.extended_buffer_data.flags;
-            t->data.extendedBufferData.timestampUs = l.u.extended_buffer_data.timestamp;
-            break;
-        case omx_message::FRAME_RENDERED:
-            t->type = Message::Type::FRAME_RENDERED;
-            t->data.renderData.timestampUs = l.u.render_data.timestamp;
-            t->data.renderData.systemTimeNs = l.u.render_data.nanoTime;
-            break;
-        default:
-            native_handle_delete(*nh);
-            return false;
-    }
-    return true;
-}
-
-/**
- * \brief Wrap a `Message` inside an `omx_message`.
- *
- * \param[out] l The wrapper of type `omx_message`.
- * \param[in] t The source `Message`.
- * \return `true` if the wrapping is successful; `false` otherwise.
- */
-// wrap: Message -> omx_message
-inline bool wrapAs(omx_message* l, Message const& t) {
-    l->fenceFd = native_handle_read_fd(t.fence);
-    switch (t.type) {
-        case Message::Type::EVENT:
-            l->type = omx_message::EVENT;
-            l->u.event_data.data1 = t.data.eventData.data1;
-            l->u.event_data.data2 = t.data.eventData.data2;
-            l->u.event_data.data3 = t.data.eventData.data3;
-            l->u.event_data.data4 = t.data.eventData.data4;
-            break;
-        case Message::Type::EMPTY_BUFFER_DONE:
-            l->type = omx_message::EMPTY_BUFFER_DONE;
-            l->u.buffer_data.buffer = t.data.bufferData.buffer;
-            break;
-        case Message::Type::FILL_BUFFER_DONE:
-            l->type = omx_message::FILL_BUFFER_DONE;
-            l->u.extended_buffer_data.buffer = t.data.extendedBufferData.buffer;
-            l->u.extended_buffer_data.range_offset = t.data.extendedBufferData.rangeOffset;
-            l->u.extended_buffer_data.range_length = t.data.extendedBufferData.rangeLength;
-            l->u.extended_buffer_data.flags = t.data.extendedBufferData.flags;
-            l->u.extended_buffer_data.timestamp = t.data.extendedBufferData.timestampUs;
-            break;
-        case Message::Type::FRAME_RENDERED:
-            l->type = omx_message::FRAME_RENDERED;
-            l->u.render_data.timestamp = t.data.renderData.timestampUs;
-            l->u.render_data.nanoTime = t.data.renderData.systemTimeNs;
-            break;
-        default:
-            return false;
-    }
-    return true;
-}
-
-/**
- * \brief Similar to `wrapTo(omx_message*, Message const&)`, but the output will
- * have an extended lifetime.
- *
- * \param[out] l The output `omx_message`.
- * \param[in] t The source `Message`.
- * \return `true` if the conversion is successful; `false` otherwise.
- *
- * This function calls `wrapto()`, then attempts to clone the file descriptor
- * for the fence if it is not `-1`. If the clone cannot be made, `false` will be
- * returned.
- */
-// convert: Message -> omx_message
-inline bool convertTo(omx_message* l, Message const& t) {
-    if (!wrapAs(l, t)) {
-        return false;
-    }
-    if (l->fenceFd == -1) {
-        return true;
-    }
-    l->fenceFd = dup(l->fenceFd);
-    return l->fenceFd != -1;
-}
-
-/**
- * \brief Wrap an `OMXFenceParcelable` inside a `hidl_handle`.
- *
- * \param[out] t The wrapper of type `hidl_handle`.
- * \param[out] nh The native handle created to hold the file descriptor inside
- * \p l.
- * \param[in] l The source `OMXFenceParcelable`, which essentially contains one
- * file descriptor.
- * \return `true` if \p t and \p nh are successfully created to wrap around \p
- * l; `false` otherwise.
- *
- * On success, \p nh needs to be deleted by the caller with
- * `native_handle_delete()` after \p t and \p nh are no longer needed.
- *
- * On failure, \p nh will not need to be deleted, and \p t will hold an invalid
- * value.
- */
-// wrap: OMXFenceParcelable -> hidl_handle, native_handle_t*
-inline bool wrapAs(hidl_handle* t, native_handle_t** nh,
-        OMXFenceParcelable const& l) {
-    *nh = native_handle_create_from_fd(l.get());
-    if (!*nh) {
-        return false;
-    }
-    *t = *nh;
-    return true;
-}
-
-/**
- * \brief Wrap a `hidl_handle` inside an `OMXFenceParcelable`.
- *
- * \param[out] l The wrapper of type `OMXFenceParcelable`.
- * \param[in] t The source `hidl_handle`.
- */
-// wrap: hidl_handle -> OMXFenceParcelable
-inline void wrapAs(OMXFenceParcelable* l, hidl_handle const& t) {
-    l->mFenceFd = native_handle_read_fd(t);
-}
-
-/**
- * \brief Convert a `hidl_handle` to `OMXFenceParcelable`. If `hidl_handle`
- * contains file descriptors, the first file descriptor will be duplicated and
- * stored in the output `OMXFenceParcelable`.
- *
- * \param[out] l The output `OMXFenceParcelable`.
- * \param[in] t The input `hidl_handle`.
- * \return `false` if \p t contains a valid file descriptor but duplication
- * fails; `true` otherwise.
- */
-// convert: hidl_handle -> OMXFenceParcelable
-inline bool convertTo(OMXFenceParcelable* l, hidl_handle const& t) {
-    int fd = native_handle_read_fd(t);
-    if (fd != -1) {
-        fd = dup(fd);
-        if (fd == -1) {
-            return false;
-        }
-    }
-    l->mFenceFd = fd;
-    return true;
-}
-
-/**
- * \brief Convert `::android::ColorAspects` to `ColorAspects`.
- *
- * \param[in] l The source `::android::ColorAspects`.
- * \return The corresponding `ColorAspects`.
- */
-// convert: ::android::ColorAspects -> ColorAspects
-inline ColorAspects toHardwareColorAspects(::android::ColorAspects const& l) {
-    return ColorAspects{
-            static_cast<ColorAspects::Range>(l.mRange),
-            static_cast<ColorAspects::Primaries>(l.mPrimaries),
-            static_cast<ColorAspects::Transfer>(l.mTransfer),
-            static_cast<ColorAspects::MatrixCoeffs>(l.mMatrixCoeffs)};
-}
-
-/**
- * \brief Convert `int32_t` to `ColorAspects`.
- *
- * \param[in] l The source `int32_t`.
- * \return The corresponding `ColorAspects`.
- */
-// convert: int32_t -> ColorAspects
-inline ColorAspects toHardwareColorAspects(int32_t l) {
-    return ColorAspects{
-            static_cast<ColorAspects::Range>((l >> 24) & 0xFF),
-            static_cast<ColorAspects::Primaries>((l >> 16) & 0xFF),
-            static_cast<ColorAspects::Transfer>(l & 0xFF),
-            static_cast<ColorAspects::MatrixCoeffs>((l >> 8) & 0xFF)};
-}
-
-/**
- * \brief Convert `ColorAspects` to `::android::ColorAspects`.
- *
- * \param[in] t The source `ColorAspects`.
- * \return The corresponding `::android::ColorAspects`.
- */
-// convert: ColorAspects -> ::android::ColorAspects
-inline int32_t toCompactColorAspects(ColorAspects const& t) {
-    return static_cast<int32_t>(
-            (static_cast<uint32_t>(t.range) << 24) |
-            (static_cast<uint32_t>(t.primaries) << 16) |
-            (static_cast<uint32_t>(t.transfer)) |
-            (static_cast<uint32_t>(t.matrixCoeffs) << 8));
-}
-
-/**
- * \brief Convert `int32_t` to `Dataspace`.
- *
- * \param[in] l The source `int32_t`.
- * \result The corresponding `Dataspace`.
- */
-// convert: int32_t -> Dataspace
-inline Dataspace toHardwareDataspace(int32_t l) {
-    return static_cast<Dataspace>(l);
-}
-
-/**
- * \brief Convert `Dataspace` to `int32_t`.
- *
- * \param[in] t The source `Dataspace`.
- * \result The corresponding `int32_t`.
- */
-// convert: Dataspace -> int32_t
-inline int32_t toRawDataspace(Dataspace const& t) {
-    return static_cast<int32_t>(t);
-}
-
-/**
- * \brief Wrap an opaque buffer inside a `hidl_vec<uint8_t>`.
- *
- * \param[in] l The pointer to the beginning of the opaque buffer.
- * \param[in] size The size of the buffer.
- * \return A `hidl_vec<uint8_t>` that points to the buffer.
- */
-// wrap: void*, size_t -> hidl_vec<uint8_t>
-inline hidl_vec<uint8_t> inHidlBytes(void const* l, size_t size) {
-    hidl_vec<uint8_t> t;
-    t.setToExternal(static_cast<uint8_t*>(const_cast<void*>(l)), size, false);
-    return t;
-}
-
-/**
- * \brief Create a `hidl_vec<uint8_t>` that is a copy of an opaque buffer.
- *
- * \param[in] l The pointer to the beginning of the opaque buffer.
- * \param[in] size The size of the buffer.
- * \return A `hidl_vec<uint8_t>` that is a copy of the input buffer.
- */
-// convert: void*, size_t -> hidl_vec<uint8_t>
-inline hidl_vec<uint8_t> toHidlBytes(void const* l, size_t size) {
-    hidl_vec<uint8_t> t;
-    t.resize(size);
-    uint8_t const* src = static_cast<uint8_t const*>(l);
-    std::copy(src, src + size, t.data());
-    return t;
-}
-
-/**
- * \brief Wrap `OMXBuffer` in `CodecBuffer`.
- *
- * \param[out] t The wrapper of type `CodecBuffer`.
- * \param[in] l The source `OMXBuffer`.
- * \return `true` if the wrapping is successful; `false` otherwise.
- *
- * TODO: Use HIDL's shared memory.
- */
-// wrap: OMXBuffer -> CodecBuffer
-inline bool wrapAs(CodecBuffer* t, OMXBuffer const& l) {
-    switch (l.mBufferType) {
-        case OMXBuffer::kBufferTypeInvalid: {
-            t->type = CodecBuffer::Type::INVALID;
-            return true;
-        }
-        case OMXBuffer::kBufferTypePreset: {
-            t->type = CodecBuffer::Type::PRESET;
-            t->attr.preset.rangeLength = static_cast<uint32_t>(l.mRangeLength);
-            return true;
-        }
-        case OMXBuffer::kBufferTypeSharedMem: {
-            t->type = CodecBuffer::Type::SHARED_MEM;
-/* TODO: Use HIDL's shared memory.
-            ssize_t offset;
-            size_t size;
-            native_handle_t* handle;
-            sp<IMemoryHeap> memoryHeap = l.mMem->getMemory(&offset, &size);
-            t->attr.sharedMem.size = static_cast<uint32_t>(size);
-            t->attr.sharedMem.flags = static_cast<uint32_t>(memoryHeap->getFlags());
-            t->attr.sharedMem.offset = static_cast<uint32_t>(offset);
-            if (!convertFd2Handle(memoryHeap->getHeapID(), nh)) {
-                return false;
-            }
-            t->nativeHandle = hidl_handle(*nh);
-            return true;*/
-            return false;
-        }
-        case OMXBuffer::kBufferTypeANWBuffer: {
-            t->type = CodecBuffer::Type::ANW_BUFFER;
-            t->attr.anwBuffer.width = l.mGraphicBuffer->getWidth();
-            t->attr.anwBuffer.height = l.mGraphicBuffer->getHeight();
-            t->attr.anwBuffer.stride = l.mGraphicBuffer->getStride();
-            t->attr.anwBuffer.format = static_cast<PixelFormat>(l.mGraphicBuffer->getPixelFormat());
-            t->attr.anwBuffer.layerCount = l.mGraphicBuffer->getLayerCount();
-            t->attr.anwBuffer.usage = l.mGraphicBuffer->getUsage();
-            t->nativeHandle = hidl_handle(l.mGraphicBuffer->handle);
-            return true;
-        }
-        case OMXBuffer::kBufferTypeNativeHandle: {
-            t->type = CodecBuffer::Type::NATIVE_HANDLE;
-            t->nativeHandle = hidl_handle(l.mNativeHandle->handle());
-            return true;
-        }
-    }
-    return false;
-}
-
-/**
- * \brief Convert `CodecBuffer` to `OMXBuffer`.
- *
- * \param[out] l The destination `OMXBuffer`.
- * \param[in] t The source `CodecBuffer`.
- * \return `true` if successful; `false` otherwise.
- *
- * TODO: Use HIDL's shared memory.
- */
-// convert: CodecBuffer -> OMXBuffer
-inline bool convertTo(OMXBuffer* l, CodecBuffer const& t) {
-    switch (t.type) {
-        case CodecBuffer::Type::INVALID: {
-            *l = OMXBuffer();
-            return true;
-        }
-        case CodecBuffer::Type::PRESET: {
-            *l = OMXBuffer(t.attr.preset.rangeLength);
-            return true;
-        }
-        case CodecBuffer::Type::SHARED_MEM: {
-/* TODO: Use HIDL's memory.
-            *l = OMXBuffer();
-            return true;*/
-            return false;
-        }
-        case CodecBuffer::Type::ANW_BUFFER: {
-            *l = OMXBuffer(sp<GraphicBuffer>(new GraphicBuffer(
-                    t.attr.anwBuffer.width,
-                    t.attr.anwBuffer.height,
-                    static_cast<::android::PixelFormat>(
-                            t.attr.anwBuffer.format),
-                    t.attr.anwBuffer.layerCount,
-                    t.attr.anwBuffer.usage,
-                    t.attr.anwBuffer.stride,
-                    native_handle_clone(t.nativeHandle),
-                    true)));
-            return true;
-        }
-        case CodecBuffer::Type::NATIVE_HANDLE: {
-            *l = OMXBuffer(NativeHandle::create(
-                    native_handle_clone(t.nativeHandle), true));
-            return true;
-        }
-    }
-    return false;
-}
-
-/**
- * \brief Convert `IOMX::ComponentInfo` to `IOmx::ComponentInfo`.
- *
- * \param[out] t The destination `IOmx::ComponentInfo`.
- * \param[in] l The source `IOMX::ComponentInfo`.
- */
-// convert: IOMX::ComponentInfo -> IOmx::ComponentInfo
-inline bool convertTo(IOmx::ComponentInfo* t, IOMX::ComponentInfo const& l) {
-    t->mName = l.mName.string();
-    t->mRoles.resize(l.mRoles.size());
-    size_t i = 0;
-    for (auto& role : l.mRoles) {
-        t->mRoles[i++] = role.string();
-    }
-    return true;
-}
-
-/**
- * \brief Convert `IOmx::ComponentInfo` to `IOMX::ComponentInfo`.
- *
- * \param[out] l The destination `IOMX::ComponentInfo`.
- * \param[in] t The source `IOmx::ComponentInfo`.
- */
-// convert: IOmx::ComponentInfo -> IOMX::ComponentInfo
-inline bool convertTo(IOMX::ComponentInfo* l, IOmx::ComponentInfo const& t) {
-    l->mName = t.mName.c_str();
-    l->mRoles.clear();
-    for (size_t i = 0; i < t.mRoles.size(); ++i) {
-        l->mRoles.push_back(String8(t.mRoles[i].c_str()));
-    }
-    return true;
-}
-
-/**
- * \brief Convert `OMX_BOOL` to `bool`.
- *
- * \param[in] l The source `OMX_BOOL`.
- * \return The destination `bool`.
- */
-// convert: OMX_BOOL -> bool
-inline bool toRawBool(OMX_BOOL l) {
-    return l == OMX_FALSE ? false : true;
-}
-
-/**
- * \brief Convert `bool` to `OMX_BOOL`.
- *
- * \param[in] t The source `bool`.
- * \return The destination `OMX_BOOL`.
- */
-// convert: bool -> OMX_BOOL
-inline OMX_BOOL toEnumBool(bool t) {
-    return t ? OMX_TRUE : OMX_FALSE;
-}
-
-/**
- * \brief Convert `OMX_COMMANDTYPE` to `uint32_t`.
- *
- * \param[in] l The source `OMX_COMMANDTYPE`.
- * \return The underlying value of type `uint32_t`.
- *
- * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
- */
-// convert: OMX_COMMANDTYPE -> uint32_t
-inline uint32_t toRawCommandType(OMX_COMMANDTYPE l) {
-    return static_cast<uint32_t>(l);
-}
-
-/**
- * \brief Convert `uint32_t` to `OMX_COMMANDTYPE`.
- *
- * \param[in] t The source `uint32_t`.
- * \return The corresponding enum value of type `OMX_COMMANDTYPE`.
- *
- * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
- */
-// convert: uint32_t -> OMX_COMMANDTYPE
-inline OMX_COMMANDTYPE toEnumCommandType(uint32_t t) {
-    return static_cast<OMX_COMMANDTYPE>(t);
-}
-
-/**
- * \brief Convert `OMX_INDEXTYPE` to `uint32_t`.
- *
- * \param[in] l The source `OMX_INDEXTYPE`.
- * \return The underlying value of type `uint32_t`.
- *
- * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
- */
-// convert: OMX_INDEXTYPE -> uint32_t
-inline uint32_t toRawIndexType(OMX_INDEXTYPE l) {
-    return static_cast<uint32_t>(l);
-}
-
-/**
- * \brief Convert `uint32_t` to `OMX_INDEXTYPE`.
- *
- * \param[in] t The source `uint32_t`.
- * \return The corresponding enum value of type `OMX_INDEXTYPE`.
- *
- * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
- */
-// convert: uint32_t -> OMX_INDEXTYPE
-inline OMX_INDEXTYPE toEnumIndexType(uint32_t t) {
-    return static_cast<OMX_INDEXTYPE>(t);
-}
-
-/**
- * \brief Convert `IOMX::PortMode` to `PortMode`.
- *
- * \param[in] l The source `IOMX::PortMode`.
- * \return The destination `PortMode`.
- */
-// convert: IOMX::PortMode -> PortMode
-inline PortMode toHardwarePortMode(IOMX::PortMode l) {
-    return static_cast<PortMode>(l);
-}
-
-/**
- * \brief Convert `PortMode` to `IOMX::PortMode`.
- *
- * \param[in] t The source `PortMode`.
- * \return The destination `IOMX::PortMode`.
- */
-// convert: PortMode -> IOMX::PortMode
-inline IOMX::PortMode toIOMXPortMode(PortMode t) {
-    return static_cast<IOMX::PortMode>(t);
-}
-
-/**
- * \brief Convert `OMX_TICKS` to `uint64_t`.
- *
- * \param[in] l The source `OMX_TICKS`.
- * \return The destination `uint64_t`.
- */
-// convert: OMX_TICKS -> uint64_t
-inline uint64_t toRawTicks(OMX_TICKS l) {
-#ifndef OMX_SKIP64BIT
-    return static_cast<uint64_t>(l);
-#else
-    return static_cast<uint64_t>(l.nLowPart) |
-            static_cast<uint64_t>(l.nHighPart << 32);
-#endif
-}
-
-/**
- * \brief Convert 'uint64_t` to `OMX_TICKS`.
- *
- * \param[in] l The source `uint64_t`.
- * \return The destination `OMX_TICKS`.
- */
-// convert: uint64_t -> OMX_TICKS
-inline OMX_TICKS toOMXTicks(uint64_t t) {
-#ifndef OMX_SKIP64BIT
-    return static_cast<OMX_TICKS>(t);
-#else
-    return OMX_TICKS{
-            static_cast<uint32_t>(t & 0xFFFFFFFF),
-            static_cast<uint32_t>(t >> 32)};
-#endif
-}
-
-
-}  // namespace implementation
-}  // namespace V1_0
-}  // namespace omx
-}  // namespace media
-}  // namespace hardware
-}  // namespace android
-
-#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
diff --git a/media/libstagefright/omx/hal/1.0/Omx.cpp b/media/libstagefright/omx/hal/1.0/Omx.cpp
deleted file mode 100644
index cb23191..0000000
--- a/media/libstagefright/omx/hal/1.0/Omx.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-#include "Omx.h"
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace implementation {
-
-// Methods from ::android::hardware::media::omx::V1_0::IOmx follow.
-Return<void> Omx::listNodes(listNodes_cb _hidl_cb) {
-    // TODO implement
-    return Void();
-}
-
-Return<void> Omx::allocateNode(const hidl_string& name, const sp<IOmxObserver>& observer, allocateNode_cb _hidl_cb) {
-    // TODO implement
-    return Void();
-}
-
-
-// Methods from ::android::hidl::base::V1_0::IBase follow.
-
-IOmx* HIDL_FETCH_IOmx(const char* /* name */) {
-    return new Omx();
-}
-
-}  // namespace implementation
-}  // namespace V1_0
-}  // namespace omx
-}  // namespace media
-}  // namespace hardware
-}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/Omx.h b/media/libstagefright/omx/hal/1.0/Omx.h
deleted file mode 100644
index 5d06444..0000000
--- a/media/libstagefright/omx/hal/1.0/Omx.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
-#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
-
-#include <android/hardware/media/omx/1.0/IOmx.h>
-#include <hidl/MQDescriptor.h>
-#include <hidl/Status.h>
-
-#include <IOMX.h>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace implementation {
-
-using ::android::hardware::media::omx::V1_0::IOmx;
-using ::android::hardware::media::omx::V1_0::IOmxNode;
-using ::android::hardware::media::omx::V1_0::IOmxObserver;
-using ::android::hardware::media::omx::V1_0::Status;
-using ::android::hidl::base::V1_0::IBase;
-using ::android::hardware::hidl_array;
-using ::android::hardware::hidl_memory;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::sp;
-
-struct Omx : public IOmx {
-    // Methods from ::android::hardware::media::omx::V1_0::IOmx follow.
-    Return<void> listNodes(listNodes_cb _hidl_cb) override;
-    Return<void> allocateNode(const hidl_string& name, const sp<IOmxObserver>& observer, allocateNode_cb _hidl_cb) override;
-
-    // Methods from ::android::hidl::base::V1_0::IBase follow.
-
-};
-
-extern "C" IOmx* HIDL_FETCH_IOmx(const char* name);
-
-}  // namespace implementation
-}  // namespace V1_0
-}  // namespace omx
-}  // namespace media
-}  // namespace hardware
-}  // namespace android
-
-#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
diff --git a/media/libstagefright/omx/hal/1.0/WOmx.cpp b/media/libstagefright/omx/hal/1.0/WOmx.cpp
deleted file mode 100644
index 25bcfe9..0000000
--- a/media/libstagefright/omx/hal/1.0/WOmx.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-#include "WOmx.h"
-#include "WOmxNode.h"
-#include "WOmxObserver.h"
-#include "Conversion.h"
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace implementation {
-
-// LWOmx
-LWOmx::LWOmx(sp<IOmx> const& base) : mBase(base) {
-}
-
-status_t LWOmx::listNodes(List<IOMX::ComponentInfo>* list) {
-    status_t fnStatus;
-    status_t transStatus = toStatusT(mBase->listNodes(
-            [&fnStatus, list](
-                    Status status,
-                    hidl_vec<IOmx::ComponentInfo> const& nodeList) {
-                fnStatus = toStatusT(status);
-                list->clear();
-                for (size_t i = 0; i < nodeList.size(); ++i) {
-                    auto newInfo = list->insert(
-                            list->end(), IOMX::ComponentInfo());
-                    convertTo(&*newInfo, nodeList[i]);
-                }
-            }));
-    return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmx::allocateNode(
-        char const* name,
-        sp<IOMXObserver> const& observer,
-        sp<IOMXNode>* omxNode) {
-    status_t fnStatus;
-    status_t transStatus = toStatusT(mBase->allocateNode(
-            name, new TWOmxObserver(observer),
-            [&fnStatus, omxNode](Status status, sp<IOmxNode> const& node) {
-                fnStatus = toStatusT(status);
-                *omxNode = new LWOmxNode(node);
-            }));
-    return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmx::createInputSurface(
-        sp<::android::IGraphicBufferProducer>* /* bufferProducer */,
-        sp<::android::IGraphicBufferSource>* /* bufferSource */) {
-    // TODO: Implement.
-    return INVALID_OPERATION;
-}
-
-::android::IBinder* LWOmx::onAsBinder() {
-    return nullptr;
-}
-
-// TWOmx
-TWOmx::TWOmx(sp<IOMX> const& base) : mBase(base) {
-}
-
-Return<void> TWOmx::listNodes(listNodes_cb _hidl_cb) {
-    List<IOMX::ComponentInfo> lList;
-    Status status = toStatus(mBase->listNodes(&lList));
-
-    hidl_vec<IOmx::ComponentInfo> tList;
-    tList.resize(lList.size());
-    size_t i = 0;
-    for (auto const& lInfo : lList) {
-        convertTo(&(tList[i++]), lInfo);
-    }
-    _hidl_cb(status, tList);
-    return Void();
-}
-
-Return<void> TWOmx::allocateNode(
-        const hidl_string& name,
-        const sp<IOmxObserver>& observer,
-        allocateNode_cb _hidl_cb) {
-    sp<IOMXNode> omxNode;
-    Status status = toStatus(mBase->allocateNode(
-            name, new LWOmxObserver(observer), &omxNode));
-    _hidl_cb(status, new TWOmxNode(omxNode));
-    return Void();
-}
-
-// TODO: Add createInputSurface().
-
-}  // namespace implementation
-}  // namespace V1_0
-}  // namespace omx
-}  // namespace media
-}  // namespace hardware
-}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/impl/Android.mk b/media/libstagefright/omx/hal/1.0/impl/Android.mk
new file mode 100644
index 0000000..ccd19d8
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/impl/Android.mk
@@ -0,0 +1,45 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := android.hardware.media.omx@1.0-impl
+LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_SRC_FILES := \
+    WGraphicBufferSource.cpp \
+    WOmx.cpp \
+    WOmxBufferProducer.cpp \
+    WOmxBufferSource.cpp \
+    WOmxNode.cpp \
+    WOmxObserver.cpp \
+    WOmxProducerListener.cpp \
+    Omx.cpp \
+    OmxNode.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+    libmedia \
+    libstagefright_foundation \
+    libstagefright_omx \
+    libui \
+    libgui \
+    libhidlbase \
+    libhidltransport \
+    libhwbinder \
+    libhidlmemory \
+    libutils \
+    libcutils \
+    libbinder \
+    liblog \
+    android.hardware.media.omx@1.0 \
+    android.hardware.graphics.common@1.0 \
+    android.hardware.media@1.0 \
+    android.hidl.base@1.0 \
+
+LOCAL_C_INCLUDES += \
+        $(TOP) \
+        $(TOP)/frameworks/av/include/media \
+        $(TOP)/frameworks/av/media/libstagefright/include \
+        $(TOP)/frameworks/av/media/libstagefright/omx \
+        $(TOP)/frameworks/native/include/media/hardware \
+        $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/frameworks/native/include \
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/omx/hal/1.0/impl/Conversion.h b/media/libstagefright/omx/hal/1.0/impl/Conversion.h
new file mode 100644
index 0000000..d27a496
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/impl/Conversion.h
@@ -0,0 +1,2156 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+#include <hidlmemory/mapping.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+
+#include <unistd.h>
+#include <vector>
+#include <list>
+
+#include <binder/Binder.h>
+#include <binder/Status.h>
+#include <ui/FenceTime.h>
+
+#include <OMXFenceParcelable.h>
+#include <cutils/native_handle.h>
+#include <gui/IGraphicBufferProducer.h>
+
+#include <IOMX.h>
+#include <VideoAPI.h>
+#include <OMXBuffer.h>
+#include <android/IOMXBufferSource.h>
+#include <android/IGraphicBufferSource.h>
+
+#include <android/hardware/media/omx/1.0/types.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/IOmxBufferProducer.h>
+#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+#include <android/hardware/media/omx/1.0/IOmxProducerListener.h>
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::String8;
+using ::android::OMXFenceParcelable;
+
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::omx_message;
+
+using ::android::hardware::media::omx::V1_0::ColorAspects;
+using ::android::hardware::media::V1_0::Rect;
+using ::android::hardware::media::V1_0::Region;
+
+using ::android::hardware::graphics::common::V1_0::Dataspace;
+
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+
+using ::android::OMXBuffer;
+
+using ::android::hardware::media::V1_0::AnwBuffer;
+using ::android::GraphicBuffer;
+
+using ::android::hardware::media::omx::V1_0::IOmx;
+using ::android::IOMX;
+
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::IOMXNode;
+
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::IOMXObserver;
+
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::IOMXBufferSource;
+
+using ::android::hardware::media::omx::V1_0::IOmxBufferProducer;
+using ::android::IGraphicBufferProducer;
+
+// native_handle_t helper functions.
+
+/**
+ * \brief Take an fd and create a native handle containing only the given fd.
+ * The created handle will need to be deleted manually with
+ * `native_handle_delete()`.
+ *
+ * \param[in] fd The source file descriptor (of type `int`).
+ * \return The create `native_handle_t*` that contains the given \p fd. If the
+ * supplied \p fd is negative, the created native handle will contain no file
+ * descriptors.
+ *
+ * If the native handle cannot be created, the return value will be
+ * `nullptr`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+inline native_handle_t* native_handle_create_from_fd(int fd) {
+    if (fd < 0) {
+        return native_handle_create(0, 0);
+    }
+    native_handle_t* nh = native_handle_create(1, 0);
+    if (nh == nullptr) {
+        return nullptr;
+    }
+    nh->data[0] = fd;
+    return nh;
+}
+
+/**
+ * \brief Extract a file descriptor from a native handle.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \param[in] index The index of the file descriptor in \p nh to read from. This
+ * input has the default value of `0`.
+ * \return The `index`-th file descriptor in \p nh. If \p nh does not have
+ * enough file descriptors, the returned value will be `-1`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+inline int native_handle_read_fd(native_handle_t const* nh, int index = 0) {
+    return ((nh == nullptr) || (nh->numFds == 0) ||
+            (nh->numFds <= index) || (index < 0)) ?
+            -1 : nh->data[index];
+}
+
+/**
+ * Conversion functions
+ * ====================
+ *
+ * There are two main directions of conversion:
+ * - `inTargetType(...)`: Create a wrapper whose lifetime depends on the
+ *   input. The wrapper has type `TargetType`.
+ * - `toTargetType(...)`: Create a standalone object of type `TargetType` that
+ *   corresponds to the input. The lifetime of the output does not depend on the
+ *   lifetime of the input.
+ * - `wrapIn(TargetType*, ...)`: Same as `inTargetType()`, but for `TargetType`
+ *   that cannot be copied and/or moved efficiently, or when there are multiple
+ *   output arguments.
+ * - `convertTo(TargetType*, ...)`: Same as `toTargetType()`, but for
+ *   `TargetType` that cannot be copied and/or moved efficiently, or when there
+ *   are multiple output arguments.
+ *
+ * `wrapIn()` and `convertTo()` functions will take output arguments before
+ * input arguments. Some of these functions might return a value to indicate
+ * success or error.
+ *
+ * In converting or wrapping something as a Treble type that contains a
+ * `hidl_handle`, `native_handle_t*` will need to be created and returned as
+ * an additional output argument, hence only `wrapIn()` or `convertTo()` would
+ * be available. The caller must call `native_handle_delete()` to deallocate the
+ * returned native handle when it is no longer needed.
+ *
+ * For types that contain file descriptors, `inTargetType()` and `wrapAs()` do
+ * not perform duplication of file descriptors, while `toTargetType()` and
+ * `convertTo()` do.
+ */
+
+/**
+ * \brief Convert `binder::Status` to `Return<void>`.
+ *
+ * \param[in] l The source `binder::Status`.
+ * \return The corresponding `Return<void>`.
+ */
+// convert: ::android::binder::Status -> Return<void>
+inline Return<void> toHardwareStatus(
+        ::android::binder::Status const& l) {
+    if (l.exceptionCode() == ::android::binder::Status::EX_SERVICE_SPECIFIC) {
+        return ::android::hardware::Status::fromServiceSpecificError(
+                l.serviceSpecificErrorCode(),
+                l.exceptionMessage());
+    }
+    return ::android::hardware::Status::fromExceptionCode(
+            l.exceptionCode(),
+            l.exceptionMessage());
+}
+
+/**
+ * \brief Convert `Return<void>` to `binder::Status`.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `binder::Status`.
+ */
+// convert: Return<void> -> ::android::binder::Status
+inline ::android::binder::Status toBinderStatus(
+        Return<void> const& t) {
+    return ::android::binder::Status::fromExceptionCode(
+            t.isOk() ? OK : UNKNOWN_ERROR,
+            t.description().c_str());
+}
+
+/**
+ * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
+ * calls.
+ *
+ * \param[in] t The source `Return<Status>`.
+ * \return The corresponding `status_t`.
+ *
+ * This function first check if \p t has a transport error. If it does, then the
+ * return value is the transport error code. Otherwise, the return value is
+ * converted from `Status` contained inside \p t.
+ *
+ * Note:
+ * - This `Status` is omx-specific. It is defined in `types.hal`.
+ * - The name of this function is not `convert`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Return<Status> const& t) {
+    return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
+}
+
+/**
+ * \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `status_t`.
+ */
+// convert: Return<void> -> status_t
+inline status_t toStatusT(Return<void> const& t) {
+    return t.isOk() ? OK : UNKNOWN_ERROR;
+}
+
+/**
+ * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Status`.
+ * \return the corresponding `status_t`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Status const& t) {
+    return static_cast<status_t>(t);
+}
+
+/**
+ * \brief Convert `status_t` to `Status`.
+ *
+ * \param[in] l The source `status_t`.
+ * \return The corresponding `Status`.
+ */
+// convert: status_t -> Status
+inline Status toStatus(status_t l) {
+    return static_cast<Status>(l);
+}
+
+/**
+ * \brief Wrap `native_handle_t*` in `hidl_handle`.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \return The `hidl_handle` that points to \p nh.
+ */
+// wrap: native_handle_t* -> hidl_handle
+inline hidl_handle inHidlHandle(native_handle_t const* nh) {
+    return hidl_handle(nh);
+}
+
+/**
+ * \brief Wrap an `omx_message` and construct the corresponding `Message`.
+ *
+ * \param[out] t The wrapper of type `Message`.
+ * \param[out] nh The native_handle_t referred to by `t->fence`.
+ * \param[in] l The source `omx_message`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ *
+ * Upon success, \p nh will be created to hold the file descriptor stored in
+ * `l.fenceFd`, and `t->fence` will point to \p nh. \p nh will need to be
+ * destroyed manually by `native_handle_delete()` when \p t is no longer needed.
+ *
+ * Upon failure, \p nh will not be created and will not need to be deleted. \p t
+ * will be invalid.
+ */
+// wrap, omx_message -> Message, native_handle_t*
+inline bool wrapAs(Message* t, native_handle_t** nh, omx_message const& l) {
+    *nh = native_handle_create_from_fd(l.fenceFd);
+    if (!*nh) {
+        return false;
+    }
+    t->fence = inHidlHandle(*nh);
+    switch (l.type) {
+        case omx_message::EVENT:
+            t->type = Message::Type::EVENT;
+            t->data.eventData.event = uint32_t(l.u.event_data.event);
+            t->data.eventData.data1 = l.u.event_data.data1;
+            t->data.eventData.data2 = l.u.event_data.data2;
+            t->data.eventData.data3 = l.u.event_data.data3;
+            t->data.eventData.data4 = l.u.event_data.data4;
+            break;
+        case omx_message::EMPTY_BUFFER_DONE:
+            t->type = Message::Type::EMPTY_BUFFER_DONE;
+            t->data.bufferData.buffer = l.u.buffer_data.buffer;
+            break;
+        case omx_message::FILL_BUFFER_DONE:
+            t->type = Message::Type::FILL_BUFFER_DONE;
+            t->data.extendedBufferData.buffer = l.u.extended_buffer_data.buffer;
+            t->data.extendedBufferData.rangeOffset =
+                    l.u.extended_buffer_data.range_offset;
+            t->data.extendedBufferData.rangeLength =
+                    l.u.extended_buffer_data.range_length;
+            t->data.extendedBufferData.flags = l.u.extended_buffer_data.flags;
+            t->data.extendedBufferData.timestampUs =
+                    l.u.extended_buffer_data.timestamp;
+            break;
+        case omx_message::FRAME_RENDERED:
+            t->type = Message::Type::FRAME_RENDERED;
+            t->data.renderData.timestampUs = l.u.render_data.timestamp;
+            t->data.renderData.systemTimeNs = l.u.render_data.nanoTime;
+            break;
+        default:
+            native_handle_delete(*nh);
+            return false;
+    }
+    return true;
+}
+
+/**
+ * \brief Wrap a `Message` inside an `omx_message`.
+ *
+ * \param[out] l The wrapper of type `omx_message`.
+ * \param[in] t The source `Message`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ */
+// wrap: Message -> omx_message
+inline bool wrapAs(omx_message* l, Message const& t) {
+    l->fenceFd = native_handle_read_fd(t.fence);
+    switch (t.type) {
+        case Message::Type::EVENT:
+            l->type = omx_message::EVENT;
+            l->u.event_data.event = OMX_EVENTTYPE(t.data.eventData.event);
+            l->u.event_data.data1 = t.data.eventData.data1;
+            l->u.event_data.data2 = t.data.eventData.data2;
+            l->u.event_data.data3 = t.data.eventData.data3;
+            l->u.event_data.data4 = t.data.eventData.data4;
+            break;
+        case Message::Type::EMPTY_BUFFER_DONE:
+            l->type = omx_message::EMPTY_BUFFER_DONE;
+            l->u.buffer_data.buffer = t.data.bufferData.buffer;
+            break;
+        case Message::Type::FILL_BUFFER_DONE:
+            l->type = omx_message::FILL_BUFFER_DONE;
+            l->u.extended_buffer_data.buffer = t.data.extendedBufferData.buffer;
+            l->u.extended_buffer_data.range_offset =
+                    t.data.extendedBufferData.rangeOffset;
+            l->u.extended_buffer_data.range_length =
+                    t.data.extendedBufferData.rangeLength;
+            l->u.extended_buffer_data.flags = t.data.extendedBufferData.flags;
+            l->u.extended_buffer_data.timestamp =
+                    t.data.extendedBufferData.timestampUs;
+            break;
+        case Message::Type::FRAME_RENDERED:
+            l->type = omx_message::FRAME_RENDERED;
+            l->u.render_data.timestamp = t.data.renderData.timestampUs;
+            l->u.render_data.nanoTime = t.data.renderData.systemTimeNs;
+            break;
+        default:
+            return false;
+    }
+    return true;
+}
+
+/**
+ * \brief Similar to `wrapTo(omx_message*, Message const&)`, but the output will
+ * have an extended lifetime.
+ *
+ * \param[out] l The output `omx_message`.
+ * \param[in] t The source `Message`.
+ * \return `true` if the conversion is successful; `false` otherwise.
+ *
+ * This function calls `wrapto()`, then attempts to duplicate the file
+ * descriptor for the fence if it is not `-1`. If duplication fails, `false`
+ * will be returned.
+ */
+// convert: Message -> omx_message
+inline bool convertTo(omx_message* l, Message const& t) {
+    if (!wrapAs(l, t)) {
+        return false;
+    }
+    if (l->fenceFd == -1) {
+        return true;
+    }
+    l->fenceFd = dup(l->fenceFd);
+    return l->fenceFd != -1;
+}
+
+/**
+ * \brief Wrap an `OMXFenceParcelable` inside a `hidl_handle`.
+ *
+ * \param[out] t The wrapper of type `hidl_handle`.
+ * \param[out] nh The native handle created to hold the file descriptor inside
+ * \p l.
+ * \param[in] l The source `OMXFenceParcelable`, which essentially contains one
+ * file descriptor.
+ * \return `true` if \p t and \p nh are successfully created to wrap around \p
+ * l; `false` otherwise.
+ *
+ * On success, \p nh needs to be deleted by the caller with
+ * `native_handle_delete()` after \p t and \p nh are no longer needed.
+ *
+ * On failure, \p nh will not need to be deleted, and \p t will hold an invalid
+ * value.
+ */
+// wrap: OMXFenceParcelable -> hidl_handle, native_handle_t*
+inline bool wrapAs(hidl_handle* t, native_handle_t** nh,
+        OMXFenceParcelable const& l) {
+    *nh = native_handle_create_from_fd(l.get());
+    if (!*nh) {
+        return false;
+    }
+    *t = *nh;
+    return true;
+}
+
+/**
+ * \brief Wrap a `hidl_handle` inside an `OMXFenceParcelable`.
+ *
+ * \param[out] l The wrapper of type `OMXFenceParcelable`.
+ * \param[in] t The source `hidl_handle`.
+ */
+// wrap: hidl_handle -> OMXFenceParcelable
+inline void wrapAs(OMXFenceParcelable* l, hidl_handle const& t) {
+    l->mFenceFd = native_handle_read_fd(t);
+}
+
+/**
+ * \brief Convert a `hidl_handle` to `OMXFenceParcelable`. If `hidl_handle`
+ * contains file descriptors, the first file descriptor will be duplicated and
+ * stored in the output `OMXFenceParcelable`.
+ *
+ * \param[out] l The output `OMXFenceParcelable`.
+ * \param[in] t The input `hidl_handle`.
+ * \return `false` if \p t contains a valid file descriptor but duplication
+ * fails; `true` otherwise.
+ */
+// convert: hidl_handle -> OMXFenceParcelable
+inline bool convertTo(OMXFenceParcelable* l, hidl_handle const& t) {
+    int fd = native_handle_read_fd(t);
+    if (fd != -1) {
+        fd = dup(fd);
+        if (fd == -1) {
+            return false;
+        }
+    }
+    l->mFenceFd = fd;
+    return true;
+}
+
+/**
+ * \brief Convert `::android::ColorAspects` to `ColorAspects`.
+ *
+ * \param[in] l The source `::android::ColorAspects`.
+ * \return The corresponding `ColorAspects`.
+ */
+// convert: ::android::ColorAspects -> ColorAspects
+inline ColorAspects toHardwareColorAspects(::android::ColorAspects const& l) {
+    return ColorAspects{
+            static_cast<ColorAspects::Range>(l.mRange),
+            static_cast<ColorAspects::Primaries>(l.mPrimaries),
+            static_cast<ColorAspects::Transfer>(l.mTransfer),
+            static_cast<ColorAspects::MatrixCoeffs>(l.mMatrixCoeffs)};
+}
+
+/**
+ * \brief Convert `int32_t` to `ColorAspects`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \return The corresponding `ColorAspects`.
+ */
+// convert: int32_t -> ColorAspects
+inline ColorAspects toHardwareColorAspects(int32_t l) {
+    return ColorAspects{
+            static_cast<ColorAspects::Range>((l >> 24) & 0xFF),
+            static_cast<ColorAspects::Primaries>((l >> 16) & 0xFF),
+            static_cast<ColorAspects::Transfer>(l & 0xFF),
+            static_cast<ColorAspects::MatrixCoeffs>((l >> 8) & 0xFF)};
+}
+
+/**
+ * \brief Convert `ColorAspects` to `::android::ColorAspects`.
+ *
+ * \param[in] t The source `ColorAspects`.
+ * \return The corresponding `::android::ColorAspects`.
+ */
+// convert: ColorAspects -> ::android::ColorAspects
+inline int32_t toCompactColorAspects(ColorAspects const& t) {
+    return static_cast<int32_t>(
+            (static_cast<uint32_t>(t.range) << 24) |
+            (static_cast<uint32_t>(t.primaries) << 16) |
+            (static_cast<uint32_t>(t.transfer)) |
+            (static_cast<uint32_t>(t.matrixCoeffs) << 8));
+}
+
+/**
+ * \brief Convert `int32_t` to `Dataspace`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \result The corresponding `Dataspace`.
+ */
+// convert: int32_t -> Dataspace
+inline Dataspace toHardwareDataspace(int32_t l) {
+    return static_cast<Dataspace>(l);
+}
+
+/**
+ * \brief Convert `Dataspace` to `int32_t`.
+ *
+ * \param[in] t The source `Dataspace`.
+ * \result The corresponding `int32_t`.
+ */
+// convert: Dataspace -> int32_t
+inline int32_t toRawDataspace(Dataspace const& t) {
+    return static_cast<int32_t>(t);
+}
+
+/**
+ * \brief Wrap an opaque buffer inside a `hidl_vec<uint8_t>`.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that points to the buffer.
+ */
+// wrap: void*, size_t -> hidl_vec<uint8_t>
+inline hidl_vec<uint8_t> inHidlBytes(void const* l, size_t size) {
+    hidl_vec<uint8_t> t;
+    t.setToExternal(static_cast<uint8_t*>(const_cast<void*>(l)), size, false);
+    return t;
+}
+
+/**
+ * \brief Create a `hidl_vec<uint8_t>` that is a copy of an opaque buffer.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that is a copy of the input buffer.
+ */
+// convert: void*, size_t -> hidl_vec<uint8_t>
+inline hidl_vec<uint8_t> toHidlBytes(void const* l, size_t size) {
+    hidl_vec<uint8_t> t;
+    t.resize(size);
+    uint8_t const* src = static_cast<uint8_t const*>(l);
+    std::copy(src, src + size, t.data());
+    return t;
+}
+
+/**
+ * \brief Wrap `OMXBuffer` in `CodecBuffer`.
+ *
+ * \param[out] t The wrapper of type `CodecBuffer`.
+ * \param[in] l The source `OMXBuffer`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ */
+// wrap: OMXBuffer -> CodecBuffer
+inline bool wrapAs(CodecBuffer* t, OMXBuffer const& l) {
+    t->nativeHandle = hidl_handle();
+    t->sharedMemory = hidl_memory();
+    switch (l.mBufferType) {
+        case OMXBuffer::kBufferTypeInvalid: {
+            t->type = CodecBuffer::Type::INVALID;
+            return true;
+        }
+        case OMXBuffer::kBufferTypePreset: {
+            t->type = CodecBuffer::Type::PRESET;
+            t->attr.preset.rangeLength = static_cast<uint32_t>(l.mRangeLength);
+            t->attr.preset.rangeOffset = static_cast<uint32_t>(l.mRangeOffset);
+            return true;
+        }
+        case OMXBuffer::kBufferTypeHidlMemory: {
+            t->type = CodecBuffer::Type::SHARED_MEM;
+            t->sharedMemory = l.mHidlMemory;
+            return true;
+        }
+        case OMXBuffer::kBufferTypeSharedMem: {
+            // This is not supported.
+            return false;
+        }
+        case OMXBuffer::kBufferTypeANWBuffer: {
+            t->type = CodecBuffer::Type::ANW_BUFFER;
+            t->attr.anwBuffer.width = l.mGraphicBuffer->getWidth();
+            t->attr.anwBuffer.height = l.mGraphicBuffer->getHeight();
+            t->attr.anwBuffer.stride = l.mGraphicBuffer->getStride();
+            t->attr.anwBuffer.format = static_cast<PixelFormat>(
+                    l.mGraphicBuffer->getPixelFormat());
+            t->attr.anwBuffer.layerCount = l.mGraphicBuffer->getLayerCount();
+            t->attr.anwBuffer.usage = l.mGraphicBuffer->getUsage();
+            t->nativeHandle = hidl_handle(l.mGraphicBuffer->handle);
+            return true;
+        }
+        case OMXBuffer::kBufferTypeNativeHandle: {
+            t->type = CodecBuffer::Type::NATIVE_HANDLE;
+            t->nativeHandle = hidl_handle(l.mNativeHandle->handle());
+            return true;
+        }
+    }
+    return false;
+}
+
+/**
+ * \brief Convert `CodecBuffer` to `OMXBuffer`.
+ *
+ * \param[out] l The destination `OMXBuffer`.
+ * \param[in] t The source `CodecBuffer`.
+ * \return `true` if successful; `false` otherwise.
+ */
+// convert: CodecBuffer -> OMXBuffer
+inline bool convertTo(OMXBuffer* l, CodecBuffer const& t) {
+    switch (t.type) {
+        case CodecBuffer::Type::INVALID: {
+            *l = OMXBuffer();
+            return true;
+        }
+        case CodecBuffer::Type::PRESET: {
+            *l = OMXBuffer(
+                    t.attr.preset.rangeOffset,
+                    t.attr.preset.rangeLength);
+            return true;
+        }
+        case CodecBuffer::Type::SHARED_MEM: {
+            *l = OMXBuffer(t.sharedMemory);
+            return true;
+        }
+        case CodecBuffer::Type::ANW_BUFFER: {
+            *l = OMXBuffer(sp<GraphicBuffer>(new GraphicBuffer(
+                    t.attr.anwBuffer.width,
+                    t.attr.anwBuffer.height,
+                    static_cast<::android::PixelFormat>(
+                            t.attr.anwBuffer.format),
+                    t.attr.anwBuffer.layerCount,
+                    t.attr.anwBuffer.usage,
+                    t.attr.anwBuffer.stride,
+                    native_handle_clone(t.nativeHandle),
+                    true)));
+            return true;
+        }
+        case CodecBuffer::Type::NATIVE_HANDLE: {
+            *l = OMXBuffer(NativeHandle::create(
+                    native_handle_clone(t.nativeHandle), true));
+            return true;
+        }
+    }
+    return false;
+}
+
+/**
+ * \brief Convert `IOMX::ComponentInfo` to `IOmx::ComponentInfo`.
+ *
+ * \param[out] t The destination `IOmx::ComponentInfo`.
+ * \param[in] l The source `IOMX::ComponentInfo`.
+ */
+// convert: IOMX::ComponentInfo -> IOmx::ComponentInfo
+inline bool convertTo(IOmx::ComponentInfo* t, IOMX::ComponentInfo const& l) {
+    t->mName = l.mName.string();
+    t->mRoles.resize(l.mRoles.size());
+    size_t i = 0;
+    for (auto& role : l.mRoles) {
+        t->mRoles[i++] = role.string();
+    }
+    return true;
+}
+
+/**
+ * \brief Convert `IOmx::ComponentInfo` to `IOMX::ComponentInfo`.
+ *
+ * \param[out] l The destination `IOMX::ComponentInfo`.
+ * \param[in] t The source `IOmx::ComponentInfo`.
+ */
+// convert: IOmx::ComponentInfo -> IOMX::ComponentInfo
+inline bool convertTo(IOMX::ComponentInfo* l, IOmx::ComponentInfo const& t) {
+    l->mName = t.mName.c_str();
+    l->mRoles.clear();
+    for (size_t i = 0; i < t.mRoles.size(); ++i) {
+        l->mRoles.push_back(String8(t.mRoles[i].c_str()));
+    }
+    return true;
+}
+
+/**
+ * \brief Convert `OMX_BOOL` to `bool`.
+ *
+ * \param[in] l The source `OMX_BOOL`.
+ * \return The destination `bool`.
+ */
+// convert: OMX_BOOL -> bool
+inline bool toRawBool(OMX_BOOL l) {
+    return l == OMX_FALSE ? false : true;
+}
+
+/**
+ * \brief Convert `bool` to `OMX_BOOL`.
+ *
+ * \param[in] t The source `bool`.
+ * \return The destination `OMX_BOOL`.
+ */
+// convert: bool -> OMX_BOOL
+inline OMX_BOOL toEnumBool(bool t) {
+    return t ? OMX_TRUE : OMX_FALSE;
+}
+
+/**
+ * \brief Convert `OMX_COMMANDTYPE` to `uint32_t`.
+ *
+ * \param[in] l The source `OMX_COMMANDTYPE`.
+ * \return The underlying value of type `uint32_t`.
+ *
+ * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: OMX_COMMANDTYPE -> uint32_t
+inline uint32_t toRawCommandType(OMX_COMMANDTYPE l) {
+    return static_cast<uint32_t>(l);
+}
+
+/**
+ * \brief Convert `uint32_t` to `OMX_COMMANDTYPE`.
+ *
+ * \param[in] t The source `uint32_t`.
+ * \return The corresponding enum value of type `OMX_COMMANDTYPE`.
+ *
+ * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: uint32_t -> OMX_COMMANDTYPE
+inline OMX_COMMANDTYPE toEnumCommandType(uint32_t t) {
+    return static_cast<OMX_COMMANDTYPE>(t);
+}
+
+/**
+ * \brief Convert `OMX_INDEXTYPE` to `uint32_t`.
+ *
+ * \param[in] l The source `OMX_INDEXTYPE`.
+ * \return The underlying value of type `uint32_t`.
+ *
+ * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: OMX_INDEXTYPE -> uint32_t
+inline uint32_t toRawIndexType(OMX_INDEXTYPE l) {
+    return static_cast<uint32_t>(l);
+}
+
+/**
+ * \brief Convert `uint32_t` to `OMX_INDEXTYPE`.
+ *
+ * \param[in] t The source `uint32_t`.
+ * \return The corresponding enum value of type `OMX_INDEXTYPE`.
+ *
+ * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: uint32_t -> OMX_INDEXTYPE
+inline OMX_INDEXTYPE toEnumIndexType(uint32_t t) {
+    return static_cast<OMX_INDEXTYPE>(t);
+}
+
+/**
+ * \brief Convert `IOMX::PortMode` to `PortMode`.
+ *
+ * \param[in] l The source `IOMX::PortMode`.
+ * \return The destination `PortMode`.
+ */
+// convert: IOMX::PortMode -> PortMode
+inline PortMode toHardwarePortMode(IOMX::PortMode l) {
+    return static_cast<PortMode>(l);
+}
+
+/**
+ * \brief Convert `PortMode` to `IOMX::PortMode`.
+ *
+ * \param[in] t The source `PortMode`.
+ * \return The destination `IOMX::PortMode`.
+ */
+// convert: PortMode -> IOMX::PortMode
+inline IOMX::PortMode toIOMXPortMode(PortMode t) {
+    return static_cast<IOMX::PortMode>(t);
+}
+
+/**
+ * \brief Convert `OMX_TICKS` to `uint64_t`.
+ *
+ * \param[in] l The source `OMX_TICKS`.
+ * \return The destination `uint64_t`.
+ */
+// convert: OMX_TICKS -> uint64_t
+inline uint64_t toRawTicks(OMX_TICKS l) {
+#ifndef OMX_SKIP64BIT
+    return static_cast<uint64_t>(l);
+#else
+    return static_cast<uint64_t>(l.nLowPart) |
+            static_cast<uint64_t>(l.nHighPart << 32);
+#endif
+}
+
+/**
+ * \brief Convert `uint64_t` to `OMX_TICKS`.
+ *
+ * \param[in] l The source `uint64_t`.
+ * \return The destination `OMX_TICKS`.
+ */
+// convert: uint64_t -> OMX_TICKS
+inline OMX_TICKS toOMXTicks(uint64_t t) {
+#ifndef OMX_SKIP64BIT
+    return static_cast<OMX_TICKS>(t);
+#else
+    return OMX_TICKS{
+            static_cast<uint32_t>(t & 0xFFFFFFFF),
+            static_cast<uint32_t>(t >> 32)};
+#endif
+}
+
+/**
+ * \brief Wrap `GraphicBuffer` in `AnwBuffer`.
+ *
+ * \param[out] t The wrapper of type `AnwBuffer`.
+ * \param[in] l The source `GraphicBuffer`.
+ */
+// wrap: GraphicBuffer -> AnwBuffer
+inline void wrapAs(AnwBuffer* t, GraphicBuffer const& l) {
+    t->attr.width = l.getWidth();
+    t->attr.height = l.getHeight();
+    t->attr.stride = l.getStride();
+    t->attr.format = static_cast<PixelFormat>(l.getPixelFormat());
+    t->attr.layerCount = l.getLayerCount();
+    t->attr.usage = l.getUsage();
+    t->attr.id = l.getId();
+    t->attr.generationNumber = l.getGenerationNumber();
+    t->nativeHandle = hidl_handle(l.handle);
+}
+
+/**
+ * \brief Convert `AnwBuffer` to `GraphicBuffer`.
+ *
+ * \param[out] l The destination `GraphicBuffer`.
+ * \param[in] t The source `AnwBuffer`.
+ *
+ * This function will duplicate all file descriptors in \p t.
+ */
+// convert: AnwBuffer -> GraphicBuffer
+// Ref: frameworks/native/libs/ui/GraphicBuffer.cpp: GraphicBuffer::flatten
+inline bool convertTo(GraphicBuffer* l, AnwBuffer const& t) {
+    native_handle_t* handle = t.nativeHandle == nullptr ?
+            nullptr : native_handle_clone(t.nativeHandle);
+
+    size_t const numInts = 12 + (handle ? handle->numInts : 0);
+    int32_t* ints = new int32_t[numInts];
+
+    size_t numFds = static_cast<size_t>(handle ? handle->numFds : 0);
+    int* fds = new int[numFds];
+
+    ints[0] = 'GBFR';
+    ints[1] = static_cast<int32_t>(t.attr.width);
+    ints[2] = static_cast<int32_t>(t.attr.height);
+    ints[3] = static_cast<int32_t>(t.attr.stride);
+    ints[4] = static_cast<int32_t>(t.attr.format);
+    ints[5] = static_cast<int32_t>(t.attr.layerCount);
+    ints[6] = static_cast<int32_t>(t.attr.usage);
+    ints[7] = static_cast<int32_t>(t.attr.id >> 32);
+    ints[8] = static_cast<int32_t>(t.attr.id & 0xFFFFFFFF);
+    ints[9] = static_cast<int32_t>(t.attr.generationNumber);
+    ints[10] = 0;
+    ints[11] = 0;
+    if (handle) {
+        ints[10] = static_cast<int32_t>(handle->numFds);
+        ints[11] = static_cast<int32_t>(handle->numInts);
+        int* intsStart = handle->data + handle->numFds;
+        std::copy(handle->data, intsStart, fds);
+        std::copy(intsStart, intsStart + handle->numInts, &ints[12]);
+    }
+
+    void const* constBuffer = static_cast<void const*>(ints);
+    size_t size = numInts * sizeof(int32_t);
+    int const* constFds = static_cast<int const*>(fds);
+    status_t status = l->unflatten(constBuffer, size, constFds, numFds);
+
+    delete [] fds;
+    delete [] ints;
+    native_handle_delete(handle);
+    return status == NO_ERROR;
+}
+
+/**
+ * Conversion functions for types outside media
+ * ============================================
+ *
+ * Some objects in libui and libgui that were made to go through binder calls do
+ * not expose ways to read or write their fields to the public. To pass an
+ * object of this kind through the HIDL boundary, translation functions need to
+ * work around the access restriction by using the publicly available
+ * `flatten()` and `unflatten()` functions.
+ *
+ * All `flatten()` and `unflatten()` overloads follow the same convention as
+ * follows:
+ *
+ *     status_t flatten(ObjectType const& object,
+ *                      [OtherType const& other, ...]
+ *                      void*& buffer, size_t& size,
+ *                      int*& fds, size_t& numFds)
+ *
+ *     status_t unflatten(ObjectType* object,
+ *                        [OtherType* other, ...,]
+ *                        void*& buffer, size_t& size,
+ *                        int*& fds, size_t& numFds)
+ *
+ * The number of `other` parameters varies depending on the `ObjectType`. For
+ * example, in the process of unflattening an object that contains
+ * `hidl_handle`, `other` is needed to hold `native_handle_t` objects that will
+ * be created.
+ *
+ * The last four parameters always work the same way in all overloads of
+ * `flatten()` and `unflatten()`:
+ * - For `flatten()`, `buffer` is the pointer to the non-fd buffer to be filled,
+ *   `size` is the size (in bytes) of the non-fd buffer pointed to by `buffer`,
+ *   `fds` is the pointer to the fd buffer to be filled, and `numFds` is the
+ *   size (in ints) of the fd buffer pointed to by `fds`.
+ * - For `unflatten()`, `buffer` is the pointer to the non-fd buffer to be read
+ *   from, `size` is the size (in bytes) of the non-fd buffer pointed to by
+ *   `buffer`, `fds` is the pointer to the fd buffer to be read from, and
+ *   `numFds` is the size (in ints) of the fd buffer pointed to by `fds`.
+ * - After a successful call to `flatten()` or `unflatten()`, `buffer` and `fds`
+ *   will be advanced, while `size` and `numFds` will be decreased to reflect
+ *   how much storage/data of the two buffers (fd and non-fd) have been used.
+ * - After an unsuccessful call, the values of `buffer`, `size`, `fds` and
+ *   `numFds` are invalid.
+ *
+ * The return value of a successful `flatten()` or `unflatten()` call will be
+ * `OK` (also aliased as `NO_ERROR`). Any other values indicate a failure.
+ *
+ * For each object type that supports flattening, there will be two accompanying
+ * functions: `getFlattenedSize()` and `getFdCount()`. `getFlattenedSize()` will
+ * return the size of the non-fd buffer that the object will need for
+ * flattening. `getFdCount()` will return the size of the fd buffer that the
+ * object will need for flattening.
+ *
+ * The set of these four functions, `getFlattenedSize()`, `getFdCount()`,
+ * `flatten()` and `unflatten()`, are similar to functions of the same name in
+ * the abstract class `Flattenable`. The only difference is that functions in
+ * this file are not member functions of the object type. For example, we write
+ *
+ *     flatten(x, buffer, size, fds, numFds)
+ *
+ * instead of
+ *
+ *     x.flatten(buffer, size, fds, numFds)
+ *
+ * because we cannot modify the type of `x`.
+ *
+ * There is one exception to the naming convention: `hidl_handle` that
+ * represents a fence. The four functions for this "Fence" type have the word
+ * "Fence" attched to their names because the object type, which is
+ * `hidl_handle`, does not carry the special meaning that the object itself can
+ * only contain zero or one file descriptor.
+ */
+
+// Ref: frameworks/native/libs/ui/Fence.cpp
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten a fence.
+ *
+ * \param[in] fence The input fence of type `hidl_handle`.
+ * \return The required size of the flat buffer.
+ *
+ * The current version of this function always returns 4, which is the number of
+ * bytes required to store the number of file descriptors contained in the fd
+ * part of the flat buffer.
+ */
+inline size_t getFenceFlattenedSize(hidl_handle const& /* fence */) {
+    return 4;
+};
+
+/**
+ * \brief Return the number of file descriptors contained in a fence.
+ *
+ * \param[in] fence The input fence of type `hidl_handle`.
+ * \return `0` if \p fence does not contain a valid file descriptor, or `1`
+ * otherwise.
+ */
+inline size_t getFenceFdCount(hidl_handle const& fence) {
+    return native_handle_read_fd(fence) == -1 ? 0 : 1;
+}
+
+/**
+ * \brief Unflatten `Fence` to `hidl_handle`.
+ *
+ * \param[out] fence The destination `hidl_handle`.
+ * \param[out] nh The underlying native handle.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will point to a newly created
+ * native handle, which needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+inline status_t unflattenFence(hidl_handle* fence, native_handle_t** nh,
+        void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+    if (size < 4) {
+        return NO_MEMORY;
+    }
+
+    uint32_t numFdsInHandle;
+    FlattenableUtils::read(buffer, size, numFdsInHandle);
+
+    if (numFdsInHandle > 1) {
+        return BAD_VALUE;
+    }
+
+    if (numFds < numFdsInHandle) {
+        return NO_MEMORY;
+    }
+
+    if (numFdsInHandle) {
+        *nh = native_handle_create_from_fd(*fds);
+        if (*nh == nullptr) {
+            return NO_MEMORY;
+        }
+        *fence = hidl_handle(*nh);
+        ++fds;
+        --numFds;
+    } else {
+        *nh = nullptr;
+        *fence = hidl_handle();
+    }
+
+    return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `hidl_handle` as `Fence`.
+ *
+ * \param[in] t The source `hidl_handle`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+inline status_t flattenFence(hidl_handle const& fence,
+        void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+    if (size < getFenceFlattenedSize(fence) ||
+            numFds < getFenceFdCount(fence)) {
+        return NO_MEMORY;
+    }
+    // Cast to uint32_t since the size of a size_t can vary between 32- and
+    // 64-bit processes
+    FlattenableUtils::write(buffer, size,
+            static_cast<uint32_t>(getFenceFdCount(fence)));
+    int fd = native_handle_read_fd(fence);
+    if (fd != -1) {
+        *fds = fd;
+        ++fds;
+        --numFds;
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Wrap `Fence` in `hidl_handle`.
+ *
+ * \param[out] t The wrapper of type `hidl_handle`.
+ * \param[out] nh The native handle pointed to by \p t.
+ * \param[in] l The source `Fence`.
+ *
+ * On success, \p nh will hold a newly created native handle, which must be
+ * deleted manually with `native_handle_delete()` afterwards.
+ */
+// wrap: Fence -> hidl_handle
+inline bool wrapAs(hidl_handle* t, native_handle_t** nh, Fence const& l) {
+    size_t const baseSize = l.getFlattenedSize();
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    size_t const baseNumFds = l.getFdCount();
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = static_cast<int*>(baseFds.get());
+    size_t numFds = baseNumFds;
+    if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (unflattenFence(t, nh, constBuffer, size, constFds, numFds)
+            != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+/**
+ * \brief Convert `hidl_handle` to `Fence`.
+ *
+ * \param[out] l The destination `Fence`. `l` must not have been used
+ * (`l->isValid()` must return `false`) before this function is called.
+ * \param[in] t The source `hidl_handle`.
+ *
+ * If \p t contains a valid file descriptor, it will be duplicated.
+ */
+// convert: hidl_handle -> Fence
+inline bool convertTo(Fence* l, hidl_handle const& t) {
+    int fd = native_handle_read_fd(t);
+    if (fd != -1) {
+        fd = dup(fd);
+        if (fd == -1) {
+            return false;
+        }
+    }
+    native_handle_t* nh = native_handle_create_from_fd(fd);
+    if (nh == nullptr) {
+        if (fd != -1) {
+            close(fd);
+        }
+        return false;
+    }
+
+    size_t const baseSize = getFenceFlattenedSize(t);
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        native_handle_delete(nh);
+        return false;
+    }
+
+    size_t const baseNumFds = getFenceFdCount(t);
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        native_handle_delete(nh);
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = static_cast<int*>(baseFds.get());
+    size_t numFds = baseNumFds;
+    if (flattenFence(hidl_handle(nh), buffer, size, fds, numFds) != NO_ERROR) {
+        native_handle_delete(nh);
+        return false;
+    }
+    native_handle_delete(nh);
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+// Ref: frameworks/native/libs/ui/FenceTime.cpp: FenceTime::Snapshot
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `FenceTimeSnapshot`.
+ *
+ * \param[in] t The input `FenceTimeSnapshot`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(
+        IOmxBufferProducer::FenceTimeSnapshot const& t) {
+    constexpr size_t min = sizeof(t.state);
+    switch (t.state) {
+        case IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY:
+            return min;
+        case IOmxBufferProducer::FenceTimeSnapshot::State::FENCE:
+            return min + getFenceFlattenedSize(t.fence);
+        case IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
+            return min + sizeof(
+                    ::android::FenceTime::Snapshot::signalTime);
+    }
+    return 0;
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `FenceTimeSnapshot`.
+ *
+ * \param[in] t The input `FenceTimeSnapshot`.
+ * \return The number of file descriptors contained in \p snapshot.
+ */
+inline size_t getFdCount(
+        IOmxBufferProducer::FenceTimeSnapshot const& t) {
+    return t.state ==
+            IOmxBufferProducer::FenceTimeSnapshot::State::FENCE ?
+            getFenceFdCount(t.fence) : 0;
+}
+
+/**
+ * \brief Flatten `FenceTimeSnapshot`.
+ *
+ * \param[in] t The source `FenceTimeSnapshot`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate the file descriptor in `t.fence` if `t.state ==
+ * FENCE`.
+ */
+inline status_t flatten(IOmxBufferProducer::FenceTimeSnapshot const& t,
+        void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+    if (size < getFlattenedSize(t)) {
+        return NO_MEMORY;
+    }
+
+    switch (t.state) {
+        case IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY:
+            FlattenableUtils::write(buffer, size,
+                    ::android::FenceTime::Snapshot::State::EMPTY);
+            return NO_ERROR;
+        case IOmxBufferProducer::FenceTimeSnapshot::State::FENCE:
+            FlattenableUtils::write(buffer, size,
+                    ::android::FenceTime::Snapshot::State::FENCE);
+            return flattenFence(t.fence, buffer, size, fds, numFds);
+        case IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
+            FlattenableUtils::write(buffer, size,
+                    ::android::FenceTime::Snapshot::State::SIGNAL_TIME);
+            FlattenableUtils::write(buffer, size, t.signalTimeNs);
+            return NO_ERROR;
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Unflatten `FenceTimeSnapshot`.
+ *
+ * \param[out] t The destination `FenceTimeSnapshot`.
+ * \param[out] nh The underlying native handle.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR` and the constructed snapshot contains a
+ * file descriptor, \p nh will be created to hold that file descriptor. In this
+ * case, \p nh needs to be deleted with `native_handle_delete()` afterwards.
+ */
+inline status_t unflatten(
+        IOmxBufferProducer::FenceTimeSnapshot* t, native_handle_t** nh,
+        void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+    if (size < sizeof(t->state)) {
+        return NO_MEMORY;
+    }
+
+    ::android::FenceTime::Snapshot::State state;
+    FlattenableUtils::read(buffer, size, state);
+    switch (state) {
+        case ::android::FenceTime::Snapshot::State::EMPTY:
+            t->state = IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY;
+            return NO_ERROR;
+        case ::android::FenceTime::Snapshot::State::FENCE:
+            t->state = IOmxBufferProducer::FenceTimeSnapshot::State::FENCE;
+            return unflattenFence(&t->fence, nh, buffer, size, fds, numFds);
+        case ::android::FenceTime::Snapshot::State::SIGNAL_TIME:
+            t->state = IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME;
+            if (size < sizeof(t->signalTimeNs)) {
+                return NO_MEMORY;
+            }
+            FlattenableUtils::read(buffer, size, t->signalTimeNs);
+            return NO_ERROR;
+    }
+    return NO_ERROR;
+}
+
+// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventsDelta
+
+/**
+ * \brief Return a lower bound on the size of the non-fd buffer required to
+ * flatten `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return A lower bound on the size of the flat buffer.
+ */
+constexpr size_t minFlattenedSize(
+        IOmxBufferProducer::FrameEventsDelta const& /* t */) {
+    return sizeof(uint64_t) + // mFrameNumber
+            sizeof(uint8_t) + // mIndex
+            sizeof(uint8_t) + // mAddPostCompositeCalled
+            sizeof(uint8_t) + // mAddRetireCalled
+            sizeof(uint8_t) + // mAddReleaseCalled
+            sizeof(nsecs_t) + // mPostedTime
+            sizeof(nsecs_t) + // mRequestedPresentTime
+            sizeof(nsecs_t) + // mLatchTime
+            sizeof(nsecs_t) + // mFirstRefreshStartTime
+            sizeof(nsecs_t); // mLastRefreshStartTime
+}
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(
+        IOmxBufferProducer::FrameEventsDelta const& t) {
+    return minFlattenedSize(t) +
+            getFlattenedSize(t.gpuCompositionDoneFence) +
+            getFlattenedSize(t.displayPresentFence) +
+            getFlattenedSize(t.displayRetireFence) +
+            getFlattenedSize(t.releaseFence);
+};
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return The number of file descriptors contained in \p t.
+ */
+inline size_t getFdCount(
+        IOmxBufferProducer::FrameEventsDelta const& t) {
+    return getFdCount(t.gpuCompositionDoneFence) +
+            getFdCount(t.displayPresentFence) +
+            getFdCount(t.displayRetireFence) +
+            getFdCount(t.releaseFence);
+};
+
+/**
+ * \brief Unflatten `FrameEventsDelta`.
+ *
+ * \param[out] t The destination `FrameEventsDelta`.
+ * \param[out] nh The underlying array of native handles.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will have length 4, and it will be
+ * populated with `nullptr` or newly created handles. Each non-null slot in \p
+ * nh will need to be deleted manually with `native_handle_delete()`.
+ */
+inline status_t unflatten(IOmxBufferProducer::FrameEventsDelta* t,
+        std::vector<native_handle_t*>* nh,
+        void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+    if (size < minFlattenedSize(*t)) {
+        return NO_MEMORY;
+    }
+    FlattenableUtils::read(buffer, size, t->frameNumber);
+
+    // These were written as uint8_t for alignment.
+    uint8_t temp = 0;
+    FlattenableUtils::read(buffer, size, temp);
+    size_t index = static_cast<size_t>(temp);
+    if (index >= ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+        return BAD_VALUE;
+    }
+    t->index = static_cast<uint32_t>(index);
+
+    FlattenableUtils::read(buffer, size, temp);
+    t->addPostCompositeCalled = static_cast<bool>(temp);
+    FlattenableUtils::read(buffer, size, temp);
+    t->addRetireCalled = static_cast<bool>(temp);
+    FlattenableUtils::read(buffer, size, temp);
+    t->addReleaseCalled = static_cast<bool>(temp);
+
+    FlattenableUtils::read(buffer, size, t->postedTimeNs);
+    FlattenableUtils::read(buffer, size, t->requestedPresentTimeNs);
+    FlattenableUtils::read(buffer, size, t->latchTimeNs);
+    FlattenableUtils::read(buffer, size, t->firstRefreshStartTimeNs);
+    FlattenableUtils::read(buffer, size, t->lastRefreshStartTimeNs);
+    FlattenableUtils::read(buffer, size, t->dequeueReadyTime);
+
+    // Fences
+    IOmxBufferProducer::FenceTimeSnapshot* tSnapshot[4];
+    tSnapshot[0] = &t->gpuCompositionDoneFence;
+    tSnapshot[1] = &t->displayPresentFence;
+    tSnapshot[2] = &t->displayRetireFence;
+    tSnapshot[3] = &t->releaseFence;
+    nh->resize(4);
+    for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
+        status_t status = unflatten(
+                tSnapshot[snapshotIndex], &((*nh)[snapshotIndex]),
+                buffer, size, fds, numFds);
+        if (status != NO_ERROR) {
+            while (snapshotIndex > 0) {
+                --snapshotIndex;
+                if ((*nh)[snapshotIndex] != nullptr) {
+                    native_handle_delete((*nh)[snapshotIndex]);
+                }
+            }
+            return status;
+        }
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `FrameEventsDelta`.
+ *
+ * \param[in] t The source `FrameEventsDelta`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate file descriptors contained in \p t.
+ */
+// Ref: frameworks/native/libs/gui/FrameTimestamp.cpp:
+//      FrameEventsDelta::flatten
+inline status_t flatten(IOmxBufferProducer::FrameEventsDelta const& t,
+        void*& buffer, size_t& size, int*& fds, size_t numFds) {
+    // Check that t.index is within a valid range.
+    if (t.index >= static_cast<uint32_t>(FrameEventHistory::MAX_FRAME_HISTORY)
+            || t.index > std::numeric_limits<uint8_t>::max()) {
+        return BAD_VALUE;
+    }
+
+    FlattenableUtils::write(buffer, size, t.frameNumber);
+
+    // These are static_cast to uint8_t for alignment.
+    FlattenableUtils::write(buffer, size, static_cast<uint8_t>(t.index));
+    FlattenableUtils::write(
+            buffer, size, static_cast<uint8_t>(t.addPostCompositeCalled));
+    FlattenableUtils::write(
+            buffer, size, static_cast<uint8_t>(t.addRetireCalled));
+    FlattenableUtils::write(
+            buffer, size, static_cast<uint8_t>(t.addReleaseCalled));
+
+    FlattenableUtils::write(buffer, size, t.postedTimeNs);
+    FlattenableUtils::write(buffer, size, t.requestedPresentTimeNs);
+    FlattenableUtils::write(buffer, size, t.latchTimeNs);
+    FlattenableUtils::write(buffer, size, t.firstRefreshStartTimeNs);
+    FlattenableUtils::write(buffer, size, t.lastRefreshStartTimeNs);
+    FlattenableUtils::write(buffer, size, t.dequeueReadyTime);
+
+    // Fences
+    IOmxBufferProducer::FenceTimeSnapshot const* tSnapshot[4];
+    tSnapshot[0] = &t.gpuCompositionDoneFence;
+    tSnapshot[1] = &t.displayPresentFence;
+    tSnapshot[2] = &t.displayRetireFence;
+    tSnapshot[3] = &t.releaseFence;
+    for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
+        status_t status = flatten(
+                *(tSnapshot[snapshotIndex]), buffer, size, fds, numFds);
+        if (status != NO_ERROR) {
+            return status;
+        }
+    }
+    return NO_ERROR;
+}
+
+// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventHistoryDelta
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[in] t The input `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(
+        IOmxBufferProducer::FrameEventHistoryDelta const& t) {
+    size_t size = 4;
+    for (size_t i = 0; i < t.size(); ++i) {
+        size += getFlattenedSize(t[i]);
+    }
+    return size;
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[in] t The input `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * \return The number of file descriptors contained in \p t.
+ */
+inline size_t getFdCount(
+        IOmxBufferProducer::FrameEventHistoryDelta const& t) {
+    size_t numFds = 0;
+    for (size_t i = 0; i < t.size(); ++i) {
+        numFds += getFdCount(t[i]);
+    }
+    return numFds;
+}
+
+/**
+ * \brief Unflatten `FrameEventHistoryDelta`.
+ *
+ * \param[out] t The destination `FrameEventHistoryDelta`.
+ * \param[out] nh The underlying array of arrays of native handles.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will be populated with `nullptr` or
+ * newly created handles. The second dimension of \p nh will be 4. Each non-null
+ * slot in \p nh will need to be deleted manually with `native_handle_delete()`.
+ */
+inline status_t unflatten(
+        IOmxBufferProducer::FrameEventHistoryDelta* t,
+        std::vector<std::vector<native_handle_t*> >* nh,
+        void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+    if (size < 4) {
+        return NO_MEMORY;
+    }
+
+    uint32_t deltaCount = 0;
+    FlattenableUtils::read(buffer, size, deltaCount);
+    if (static_cast<size_t>(deltaCount) >
+            ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+        return BAD_VALUE;
+    }
+    t->resize(deltaCount);
+    nh->resize(deltaCount);
+    for (size_t deltaIndex = 0; deltaIndex < deltaCount; ++deltaIndex) {
+        status_t status = unflatten(
+                &((*t)[deltaIndex]), &((*nh)[deltaIndex]),
+                buffer, size, fds, numFds);
+        if (status != NO_ERROR) {
+            return status;
+        }
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `FrameEventHistoryDelta`.
+ *
+ * \param[in] t The source `FrameEventHistoryDelta`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate file descriptors contained in \p t.
+ */
+inline status_t flatten(
+        IOmxBufferProducer::FrameEventHistoryDelta const& t,
+        void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+    if (t.size() > ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+        return BAD_VALUE;
+    }
+    if (size < getFlattenedSize(t)) {
+        return NO_MEMORY;
+    }
+
+    FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.size()));
+    for (size_t deltaIndex = 0; deltaIndex < t.size(); ++deltaIndex) {
+        status_t status = flatten(t[deltaIndex], buffer, size, fds, numFds);
+        if (status != NO_ERROR) {
+            return status;
+        }
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Wrap `::android::FrameEventHistoryData` in
+ * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[out] t The wrapper of type
+ * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * \param[out] nh The array of array of native handles that are referred to by
+ * members of \p t.
+ * \param[in] l The source `::android::FrameEventHistoryDelta`.
+ *
+ * On success, each member of \p nh will be either `nullptr` or a newly created
+ * native handle. All the non-`nullptr` elements must be deleted individually
+ * with `native_handle_delete()`.
+ */
+inline bool wrapAs(IOmxBufferProducer::FrameEventHistoryDelta* t,
+        std::vector<std::vector<native_handle_t*> >* nh,
+        ::android::FrameEventHistoryDelta const& l) {
+
+    size_t const baseSize = l.getFlattenedSize();
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    size_t const baseNumFds = l.getFdCount();
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = baseFds.get();
+    size_t numFds = baseNumFds;
+    if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+/**
+ * \brief Convert `IOmxBufferProducer::FrameEventHistoryDelta` to
+ * `::android::FrameEventHistoryDelta`.
+ *
+ * \param[out] l The destination `::android::FrameEventHistoryDelta`.
+ * \param[in] t The source `IOmxBufferProducer::FrameEventHistoryDelta`.
+ *
+ * This function will duplicate all file descriptors contained in \p t.
+ */
+inline bool convertTo(
+        ::android::FrameEventHistoryDelta* l,
+        IOmxBufferProducer::FrameEventHistoryDelta const& t) {
+
+    size_t const baseSize = getFlattenedSize(t);
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    size_t const baseNumFds = getFdCount(t);
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = static_cast<int*>(baseFds.get());
+    size_t numFds = baseNumFds;
+    if (flatten(t, buffer, size, fds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+// Ref: frameworks/native/libs/ui/Region.cpp
+
+/**
+ * \brief Return the size of the buffer required to flatten `Region`.
+ *
+ * \param[in] t The input `Region`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(Region const& t) {
+    return sizeof(uint32_t) + t.size() * sizeof(::android::Rect);
+}
+
+/**
+ * \brief Unflatten `Region`.
+ *
+ * \param[out] t The destination `Region`.
+ * \param[in,out] buffer The pointer to the flat buffer.
+ * \param[in,out] size The size of the flat buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+inline status_t unflatten(Region* t, void const*& buffer, size_t& size) {
+    if (size < sizeof(uint32_t)) {
+        return NO_MEMORY;
+    }
+
+    uint32_t numRects = 0;
+    FlattenableUtils::read(buffer, size, numRects);
+    if (size < numRects * sizeof(Rect)) {
+        return NO_MEMORY;
+    }
+    if (numRects > (UINT32_MAX / sizeof(Rect))) {
+        return NO_MEMORY;
+    }
+
+    t->resize(numRects);
+    for (size_t r = 0; r < numRects; ++r) {
+        ::android::Rect rect(::android::Rect::EMPTY_RECT);
+        status_t status = rect.unflatten(buffer, size);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        FlattenableUtils::advance(buffer, size, sizeof(rect));
+        (*t)[r] = Rect{
+                static_cast<int32_t>(rect.left),
+                static_cast<int32_t>(rect.top),
+                static_cast<int32_t>(rect.right),
+                static_cast<int32_t>(rect.bottom)};
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `Region`.
+ *
+ * \param[in] t The source `Region`.
+ * \param[in,out] buffer The pointer to the flat buffer.
+ * \param[in,out] size The size of the flat buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+inline status_t flatten(Region const& t, void*& buffer, size_t& size) {
+    if (size < getFlattenedSize(t)) {
+        return NO_MEMORY;
+    }
+
+    FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.size()));
+    for (size_t r = 0; r < t.size(); ++r) {
+        ::android::Rect rect(
+                static_cast<int32_t>(t[r].left),
+                static_cast<int32_t>(t[r].top),
+                static_cast<int32_t>(t[r].right),
+                static_cast<int32_t>(t[r].bottom));
+        status_t status = rect.flatten(buffer, size);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        FlattenableUtils::advance(buffer, size, sizeof(rect));
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Convert `::android::Region` to `Region`.
+ *
+ * \param[out] t The destination `Region`.
+ * \param[in] l The source `::android::Region`.
+ */
+// convert: ::android::Region -> Region
+inline bool convertTo(Region* t, ::android::Region const& l) {
+    size_t const baseSize = l.getFlattenedSize();
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    if (l.flatten(buffer, size) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    if (unflatten(t, constBuffer, size) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+/**
+ * \brief Convert `Region` to `::android::Region`.
+ *
+ * \param[out] l The destination `::android::Region`.
+ * \param[in] t The source `Region`.
+ */
+// convert: Region -> ::android::Region
+inline bool convertTo(::android::Region* l, Region const& t) {
+    size_t const baseSize = getFlattenedSize(t);
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    if (flatten(t, buffer, size) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    if (l->unflatten(constBuffer, size) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+// Ref: frameworks/native/libs/gui/IGraphicBufferProducer.cpp:
+//      IGraphicBufferProducer::QueueBufferInput
+
+/**
+ * \brief Return a lower bound on the size of the buffer required to flatten
+ * `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
+ * \return A lower bound on the size of the flat buffer.
+ */
+constexpr size_t minFlattenedSize(
+        IOmxBufferProducer::QueueBufferInput const& /* t */) {
+    return sizeof(int64_t) + // timestamp
+            sizeof(int) + // isAutoTimestamp
+            sizeof(android_dataspace) + // dataSpace
+            sizeof(::android::Rect) + // crop
+            sizeof(int) + // scalingMode
+            sizeof(uint32_t) + // transform
+            sizeof(uint32_t) + // stickyTransform
+            sizeof(bool); // getFrameTimestamps
+}
+
+/**
+ * \brief Return the size of the buffer required to flatten
+ * `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(IOmxBufferProducer::QueueBufferInput const& t) {
+    return minFlattenedSize(t) +
+            getFenceFlattenedSize(t.fence) +
+            getFlattenedSize(t.surfaceDamage);
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
+ * \return The number of file descriptors contained in \p t.
+ */
+inline size_t getFdCount(
+        IOmxBufferProducer::QueueBufferInput const& t) {
+    return getFenceFdCount(t.fence);
+}
+
+/**
+ * \brief Flatten `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The source `IOmxBufferProducer::QueueBufferInput`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate the file descriptor in `t.fence`. */
+inline status_t flatten(IOmxBufferProducer::QueueBufferInput const& t,
+        void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+    if (size < getFlattenedSize(t)) {
+        return NO_MEMORY;
+    }
+
+    FlattenableUtils::write(buffer, size, t.timestamp);
+    FlattenableUtils::write(buffer, size, static_cast<int>(t.isAutoTimestamp));
+    FlattenableUtils::write(buffer, size,
+            static_cast<android_dataspace_t>(t.dataSpace));
+    FlattenableUtils::write(buffer, size, ::android::Rect(
+            static_cast<int32_t>(t.crop.left),
+            static_cast<int32_t>(t.crop.top),
+            static_cast<int32_t>(t.crop.right),
+            static_cast<int32_t>(t.crop.bottom)));
+    FlattenableUtils::write(buffer, size, static_cast<int>(t.scalingMode));
+    FlattenableUtils::write(buffer, size, t.transform);
+    FlattenableUtils::write(buffer, size, t.stickyTransform);
+    FlattenableUtils::write(buffer, size, t.getFrameTimestamps);
+
+    status_t status = flattenFence(t.fence, buffer, size, fds, numFds);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    return flatten(t.surfaceDamage, buffer, size);
+}
+
+/**
+ * \brief Unflatten `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] t The destination `IOmxBufferProducer::QueueBufferInput`.
+ * \param[out] nh The underlying native handle for `t->fence`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR` and `t->fence` contains a valid file
+ * descriptor, \p nh will be a newly created native handle holding that file
+ * descriptor. \p nh needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+inline status_t unflatten(
+        IOmxBufferProducer::QueueBufferInput* t, native_handle_t** nh,
+        void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+    if (size < minFlattenedSize(*t)) {
+        return NO_MEMORY;
+    }
+
+    FlattenableUtils::read(buffer, size, t->timestamp);
+    int lIsAutoTimestamp;
+    FlattenableUtils::read(buffer, size, lIsAutoTimestamp);
+    t->isAutoTimestamp = static_cast<int32_t>(lIsAutoTimestamp);
+    android_dataspace_t lDataSpace;
+    FlattenableUtils::read(buffer, size, lDataSpace);
+    t->dataSpace = static_cast<Dataspace>(lDataSpace);
+    Rect lCrop;
+    FlattenableUtils::read(buffer, size, lCrop);
+    t->crop = Rect{
+            static_cast<int32_t>(lCrop.left),
+            static_cast<int32_t>(lCrop.top),
+            static_cast<int32_t>(lCrop.right),
+            static_cast<int32_t>(lCrop.bottom)};
+    int lScalingMode;
+    FlattenableUtils::read(buffer, size, lScalingMode);
+    t->scalingMode = static_cast<int32_t>(lScalingMode);
+    FlattenableUtils::read(buffer, size, t->transform);
+    FlattenableUtils::read(buffer, size, t->stickyTransform);
+    FlattenableUtils::read(buffer, size, t->getFrameTimestamps);
+
+    status_t status = unflattenFence(&(t->fence), nh,
+            buffer, size, fds, numFds);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    return unflatten(&(t->surfaceDamage), buffer, size);
+}
+
+/**
+ * \brief Wrap `IGraphicBufferProducer::QueueBufferInput` in
+ * `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] t The wrapper of type
+ * `IOmxBufferProducer::QueueBufferInput`.
+ * \param[out] nh The underlying native handle for `t->fence`.
+ * \param[in] l The source `IGraphicBufferProducer::QueueBufferInput`.
+ *
+ * If the return value is `true` and `t->fence` contains a valid file
+ * descriptor, \p nh will be a newly created native handle holding that file
+ * descriptor. \p nh needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+inline bool wrapAs(
+        IOmxBufferProducer::QueueBufferInput* t,
+        native_handle_t** nh,
+        IGraphicBufferProducer::QueueBufferInput const& l) {
+
+    size_t const baseSize = l.getFlattenedSize();
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    size_t const baseNumFds = l.getFdCount();
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = baseFds.get();
+    size_t numFds = baseNumFds;
+    if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+/**
+ * \brief Convert `IOmxBufferProducer::QueueBufferInput` to
+ * `IGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] l The destination `IGraphicBufferProducer::QueueBufferInput`.
+ * \param[in] t The source `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * If `t.fence` has a valid file descriptor, it will be duplicated.
+ */
+inline bool convertTo(
+        IGraphicBufferProducer::QueueBufferInput* l,
+        IOmxBufferProducer::QueueBufferInput const& t) {
+
+    size_t const baseSize = getFlattenedSize(t);
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    size_t const baseNumFds = getFdCount(t);
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = baseFds.get();
+    size_t numFds = baseNumFds;
+    if (flatten(t, buffer, size, fds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+// Ref: frameworks/native/libs/gui/IGraphicBufferProducer.cpp:
+//      IGraphicBufferProducer::QueueBufferOutput
+
+/**
+ * \brief Wrap `IGraphicBufferProducer::QueueBufferOutput` in
+ * `IOmxBufferProducer::QueueBufferOutput`.
+ *
+ * \param[out] t The wrapper of type
+ * `IOmxBufferProducer::QueueBufferOutput`.
+ * \param[out] nh The array of array of native handles that are referred to by
+ * members of \p t.
+ * \param[in] l The source `IGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * On success, each member of \p nh will be either `nullptr` or a newly created
+ * native handle. All the non-`nullptr` elements must be deleted individually
+ * with `native_handle_delete()`.
+ */
+// wrap: IGraphicBufferProducer::QueueBufferOutput ->
+// IOmxBufferProducer::QueueBufferOutput
+inline bool wrapAs(IOmxBufferProducer::QueueBufferOutput* t,
+        std::vector<std::vector<native_handle_t*> >* nh,
+        IGraphicBufferProducer::QueueBufferOutput const& l) {
+    if (!wrapAs(&(t->frameTimestamps), nh, l.frameTimestamps)) {
+        return false;
+    }
+    t->width = l.width;
+    t->height = l.height;
+    t->transformHint = l.transformHint;
+    t->numPendingBuffers = l.numPendingBuffers;
+    t->nextFrameNumber = l.nextFrameNumber;
+    return true;
+}
+
+/**
+ * \brief Convert `IOmxBufferProducer::QueueBufferOutput` to
+ * `IGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * \param[out] l The destination `IGraphicBufferProducer::QueueBufferOutput`.
+ * \param[in] t The source `IOmxBufferProducer::QueueBufferOutput`.
+ *
+ * This function will duplicate all file descriptors contained in \p t.
+ */
+// convert: IOmxBufferProducer::QueueBufferOutput ->
+// IGraphicBufferProducer::QueueBufferOutput
+inline bool convertTo(
+        IGraphicBufferProducer::QueueBufferOutput* l,
+        IOmxBufferProducer::QueueBufferOutput const& t) {
+    if (!convertTo(&(l->frameTimestamps), t.frameTimestamps)) {
+        return false;
+    }
+    l->width = t.width;
+    l->height = t.height;
+    l->transformHint = t.transformHint;
+    l->numPendingBuffers = t.numPendingBuffers;
+    l->nextFrameNumber = t.nextFrameNumber;
+    return true;
+}
+
+/**
+ * \brief Convert `IGraphicBufferProducer::DisconnectMode` to
+ * `IOmxBufferProducer::DisconnectMode`.
+ *
+ * \param[in] l The source `IGraphicBufferProducer::DisconnectMode`.
+ * \return The corresponding `IOmxBufferProducer::DisconnectMode`.
+ */
+inline IOmxBufferProducer::DisconnectMode toOmxDisconnectMode(
+        IGraphicBufferProducer::DisconnectMode l) {
+    switch (l) {
+        case IGraphicBufferProducer::DisconnectMode::Api:
+            return IOmxBufferProducer::DisconnectMode::API;
+        case IGraphicBufferProducer::DisconnectMode::AllLocal:
+            return IOmxBufferProducer::DisconnectMode::ALL_LOCAL;
+    }
+    return IOmxBufferProducer::DisconnectMode::API;
+}
+
+/**
+ * \brief Convert `IOmxBufferProducer::DisconnectMode` to
+ * `IGraphicBufferProducer::DisconnectMode`.
+ *
+ * \param[in] l The source `IOmxBufferProducer::DisconnectMode`.
+ * \return The corresponding `IGraphicBufferProducer::DisconnectMode`.
+ */
+inline IGraphicBufferProducer::DisconnectMode toGuiDisconnectMode(
+        IOmxBufferProducer::DisconnectMode t) {
+    switch (t) {
+        case IOmxBufferProducer::DisconnectMode::API:
+            return IGraphicBufferProducer::DisconnectMode::Api;
+        case IOmxBufferProducer::DisconnectMode::ALL_LOCAL:
+            return IGraphicBufferProducer::DisconnectMode::AllLocal;
+    }
+    return IGraphicBufferProducer::DisconnectMode::Api;
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
diff --git a/media/libstagefright/omx/hal/1.0/impl/Omx.cpp b/media/libstagefright/omx/hal/1.0/impl/Omx.cpp
new file mode 100644
index 0000000..eddcc77
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/impl/Omx.cpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <list>
+
+#include "Omx.h"
+#include <IOMX.h>
+#include <OMXMaster.h>
+#include <OMXNodeInstance.h>
+#include <GraphicBufferSource.h>
+#include <gui/IGraphicBufferProducer.h>
+
+#include <OMX_AsString.h>
+#include <OMXUtils.h>
+
+#include "WOmxNode.h"
+#include "WOmxObserver.h"
+#include "WOmxBufferProducer.h"
+#include "WGraphicBufferSource.h"
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+constexpr size_t kMaxNodeInstances = (1 << 16);
+
+Omx::Omx() : mMaster(new OMXMaster()) {
+}
+
+Omx::~Omx() {
+    delete mMaster;
+}
+
+Return<void> Omx::listNodes(listNodes_cb _hidl_cb) {
+    std::list<::android::IOMX::ComponentInfo> list;
+    OMX_U32 index = 0;
+    char componentName[256];
+    for (OMX_U32 index = 0;
+            mMaster->enumerateComponents(
+            componentName, sizeof(componentName), index) == OMX_ErrorNone;
+            ++index) {
+        list.push_back(::android::IOMX::ComponentInfo());
+        ::android::IOMX::ComponentInfo& info = list.back();
+        info.mName = componentName;
+        ::android::Vector<::android::String8> roles;
+        OMX_ERRORTYPE err =
+                mMaster->getRolesOfComponent(componentName, &roles);
+        if (err == OMX_ErrorNone) {
+            for (OMX_U32 i = 0; i < roles.size(); ++i) {
+                info.mRoles.push_back(roles[i]);
+            }
+        }
+    }
+
+    hidl_vec<ComponentInfo> tList;
+    tList.resize(list.size());
+    size_t i = 0;
+    for (auto const& info : list) {
+        convertTo(&(tList[i++]), info);
+    }
+    _hidl_cb(toStatus(OK), tList);
+    return Void();
+}
+
+Return<void> Omx::allocateNode(
+        const hidl_string& name,
+        const sp<IOmxObserver>& observer,
+        allocateNode_cb _hidl_cb) {
+
+    using ::android::IOMXNode;
+    using ::android::IOMXObserver;
+
+    Mutex::Autolock autoLock(mLock);
+    if (mLiveNodes.size() == kMaxNodeInstances) {
+        _hidl_cb(toStatus(NO_MEMORY), nullptr);
+        return Void();
+    }
+
+    sp<OMXNodeInstance> instance = new OMXNodeInstance(
+            this, new LWOmxObserver(observer), name);
+
+    OMX_COMPONENTTYPE *handle;
+    OMX_ERRORTYPE err = mMaster->makeComponentInstance(
+            name, &OMXNodeInstance::kCallbacks,
+            instance.get(), &handle);
+
+    if (err != OMX_ErrorNone) {
+        ALOGE("FAILED to allocate omx component '%s' err=%s(%#x)",
+                name.c_str(), asString(err), err);
+
+        _hidl_cb(toStatus(StatusFromOMXError(err)), nullptr);
+        return Void();
+    }
+    instance->setHandle(handle);
+
+    mLiveNodes.add(observer.get(), instance);
+    observer->linkToDeath(this, 0);
+    mNode2Observer.add(instance.get(), observer.get());
+
+    _hidl_cb(toStatus(OK), new TWOmxNode(instance));
+    return Void();
+}
+
+Return<void> Omx::createInputSurface(createInputSurface_cb _hidl_cb) {
+    sp<::android::IGraphicBufferProducer> bufferProducer;
+    sp<::android::IGraphicBufferSource> bufferSource;
+
+    sp<GraphicBufferSource> graphicBufferSource = new GraphicBufferSource();
+    status_t err = graphicBufferSource->initCheck();
+    if (err != OK) {
+        ALOGE("Failed to create persistent input surface: %s (%d)",
+                strerror(-err), err);
+        _hidl_cb(toStatus(err), nullptr, nullptr);
+        return Void();
+    }
+    bufferProducer = graphicBufferSource->getIGraphicBufferProducer();
+    bufferSource = graphicBufferSource;
+
+    _hidl_cb(toStatus(OK),
+            new TWOmxBufferProducer(bufferProducer),
+            new TWGraphicBufferSource(bufferSource));
+    return Void();
+}
+
+void Omx::serviceDied(uint64_t /* cookie */, wp<IBase> const& who) {
+    sp<OMXNodeInstance> instance;
+    {
+        Mutex::Autolock autoLock(mLock);
+
+        ssize_t index = mLiveNodes.indexOfKey(who);
+
+        if (index < 0) {
+            ALOGE("b/27597103, nonexistent observer on serviceDied");
+            android_errorWriteLog(0x534e4554, "27597103");
+            return;
+        }
+
+        instance = mLiveNodes.editValueAt(index);
+        mLiveNodes.removeItemsAt(index);
+        mNode2Observer.removeItem(instance.get());
+    }
+    instance->onObserverDied();
+}
+
+status_t Omx::freeNode(sp<OMXNodeInstance> const& instance) {
+    if (instance == NULL) {
+        return OK;
+    }
+
+    wp<IBase> observer;
+    {
+        Mutex::Autolock autoLock(mLock);
+        ssize_t observerIndex = mNode2Observer.indexOfKey(instance.get());
+        if (observerIndex < 0) {
+            return OK;
+        }
+        observer = mNode2Observer.valueAt(observerIndex);
+        ssize_t nodeIndex = mLiveNodes.indexOfKey(observer);
+        if (nodeIndex < 0) {
+            return OK;
+        }
+        mNode2Observer.removeItemsAt(observerIndex);
+        mLiveNodes.removeItemsAt(nodeIndex);
+    }
+
+    {
+        sp<IBase> sObserver = observer.promote();
+        if (sObserver != nullptr) {
+            sObserver->unlinkToDeath(this);
+        }
+    }
+
+    OMX_ERRORTYPE err = OMX_ErrorNone;
+    if (instance->handle() != NULL) {
+        err = mMaster->destroyComponentInstance(
+                static_cast<OMX_COMPONENTTYPE*>(instance->handle()));
+    }
+    return StatusFromOMXError(err);
+}
+
+// Methods from ::android::hidl::base::V1_0::IBase follow.
+
+IOmx* HIDL_FETCH_IOmx(const char* /* name */) {
+    return new Omx();
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/impl/Omx.h b/media/libstagefright/omx/hal/1.0/impl/Omx.h
new file mode 100644
index 0000000..7633820
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/impl/Omx.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
+
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <utils/threads.h>
+#include <utils/KeyedVector.h>
+#include <OmxNodeOwner.h>
+
+namespace android {
+
+struct OMXMaster;
+
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::IOmx;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_death_recipient;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+using ::android::wp;
+
+using ::android::OMXMaster;
+using ::android::OmxNodeOwner;
+using ::android::OMXNodeInstance;
+
+struct Omx : public IOmx, public hidl_death_recipient, public OmxNodeOwner {
+    Omx();
+    virtual ~Omx();
+
+    // Methods from IOmx
+    Return<void> listNodes(listNodes_cb _hidl_cb) override;
+    Return<void> allocateNode(
+            const hidl_string& name,
+            const sp<IOmxObserver>& observer,
+            allocateNode_cb _hidl_cb) override;
+    Return<void> createInputSurface(createInputSurface_cb _hidl_cb) override;
+
+    // Method from hidl_death_recipient
+    void serviceDied(uint64_t cookie, const wp<IBase>& who) override;
+
+    // Method from OmxNodeOwner
+    virtual status_t freeNode(sp<OMXNodeInstance> const& instance) override;
+
+protected:
+    OMXMaster* mMaster;
+    Mutex mLock;
+    KeyedVector<wp<IBase>, sp<OMXNodeInstance> > mLiveNodes;
+    KeyedVector<OMXNodeInstance*, wp<IBase> > mNode2Observer;
+};
+
+extern "C" IOmx* HIDL_FETCH_IOmx(const char* name);
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
diff --git a/media/libstagefright/omx/hal/1.0/OmxNode.cpp b/media/libstagefright/omx/hal/1.0/impl/OmxNode.cpp
similarity index 85%
rename from media/libstagefright/omx/hal/1.0/OmxNode.cpp
rename to media/libstagefright/omx/hal/1.0/impl/OmxNode.cpp
index 08d1cd7..f62269f 100644
--- a/media/libstagefright/omx/hal/1.0/OmxNode.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/OmxNode.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #include <IOMX.h>
 #include <OMXNodeInstance.h>
 #include "OmxNode.h"
diff --git a/media/libstagefright/omx/hal/1.0/OmxNode.h b/media/libstagefright/omx/hal/1.0/impl/OmxNode.h
similarity index 85%
rename from media/libstagefright/omx/hal/1.0/OmxNode.h
rename to media/libstagefright/omx/hal/1.0/impl/OmxNode.h
index e05e107..fc19306 100644
--- a/media/libstagefright/omx/hal/1.0/OmxNode.h
+++ b/media/libstagefright/omx/hal/1.0/impl/OmxNode.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMXNODE_H
 #define ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMXNODE_H
 
diff --git a/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.cpp b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp
similarity index 86%
rename from media/libstagefright/omx/hal/1.0/WGraphicBufferSource.cpp
rename to media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp
index 0ec31f2..9de8e3e 100644
--- a/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #include "WGraphicBufferSource.h"
 #include "Conversion.h"
 #include "WOmxNode.h"
diff --git a/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.h b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h
similarity index 85%
rename from media/libstagefright/omx/hal/1.0/WGraphicBufferSource.h
rename to media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h
index 66977ad..0b9f2ed 100644
--- a/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
 #define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
 
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmx.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmx.cpp
new file mode 100644
index 0000000..0fa8c4c
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmx.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WOmx.h"
+#include "WOmxNode.h"
+#include "WOmxObserver.h"
+#include "WOmxBufferProducer.h"
+#include "WGraphicBufferSource.h"
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// LWOmx
+LWOmx::LWOmx(sp<IOmx> const& base) : mBase(base) {
+}
+
+status_t LWOmx::listNodes(List<IOMX::ComponentInfo>* list) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->listNodes(
+            [&fnStatus, list](
+                    Status status,
+                    hidl_vec<IOmx::ComponentInfo> const& nodeList) {
+                fnStatus = toStatusT(status);
+                list->clear();
+                for (size_t i = 0; i < nodeList.size(); ++i) {
+                    auto newInfo = list->insert(
+                            list->end(), IOMX::ComponentInfo());
+                    convertTo(&*newInfo, nodeList[i]);
+                }
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmx::allocateNode(
+        char const* name,
+        sp<IOMXObserver> const& observer,
+        sp<IOMXNode>* omxNode) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->allocateNode(
+            name, new TWOmxObserver(observer),
+            [&fnStatus, omxNode](Status status, sp<IOmxNode> const& node) {
+                fnStatus = toStatusT(status);
+                *omxNode = new LWOmxNode(node);
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmx::createInputSurface(
+        sp<::android::IGraphicBufferProducer>* bufferProducer,
+        sp<::android::IGraphicBufferSource>* bufferSource) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->createInputSurface(
+            [&fnStatus, bufferProducer, bufferSource] (
+                    Status status,
+                    sp<IOmxBufferProducer> const& tProducer,
+                    sp<IGraphicBufferSource> const& tSource) {
+                fnStatus = toStatusT(status);
+                *bufferProducer = new LWOmxBufferProducer(tProducer);
+                *bufferSource = new LWGraphicBufferSource(tSource);
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+::android::IBinder* LWOmx::onAsBinder() {
+    return nullptr;
+}
+
+// TWOmx
+TWOmx::TWOmx(sp<IOMX> const& base) : mBase(base) {
+}
+
+Return<void> TWOmx::listNodes(listNodes_cb _hidl_cb) {
+    List<IOMX::ComponentInfo> lList;
+    Status status = toStatus(mBase->listNodes(&lList));
+
+    hidl_vec<IOmx::ComponentInfo> tList;
+    tList.resize(lList.size());
+    size_t i = 0;
+    for (auto const& lInfo : lList) {
+        convertTo(&(tList[i++]), lInfo);
+    }
+    _hidl_cb(status, tList);
+    return Void();
+}
+
+Return<void> TWOmx::allocateNode(
+        const hidl_string& name,
+        const sp<IOmxObserver>& observer,
+        allocateNode_cb _hidl_cb) {
+    sp<IOMXNode> omxNode;
+    Status status = toStatus(mBase->allocateNode(
+            name, new LWOmxObserver(observer), &omxNode));
+    _hidl_cb(status, new TWOmxNode(omxNode));
+    return Void();
+}
+
+Return<void> TWOmx::createInputSurface(createInputSurface_cb _hidl_cb) {
+    sp<::android::IGraphicBufferProducer> lProducer;
+    sp<::android::IGraphicBufferSource> lSource;
+    status_t status = mBase->createInputSurface(&lProducer, &lSource);
+    _hidl_cb(toStatus(status),
+             new TWOmxBufferProducer(lProducer),
+             new TWGraphicBufferSource(lSource));
+    return Void();
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/WOmx.h b/media/libstagefright/omx/hal/1.0/impl/WOmx.h
similarity index 75%
rename from media/libstagefright/omx/hal/1.0/WOmx.h
rename to media/libstagefright/omx/hal/1.0/impl/WOmx.h
index b07c4f2..5618d27 100644
--- a/media/libstagefright/omx/hal/1.0/WOmx.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmx.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMX_H
 #define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMX_H
 
@@ -62,6 +78,7 @@
             const hidl_string& name,
             const sp<IOmxObserver>& observer,
             allocateNode_cb _hidl_cb) override;
+    Return<void> createInputSurface(createInputSurface_cb _hidl_cb) override;
 
 };
 
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.cpp
new file mode 100644
index 0000000..a459c9f
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.cpp
@@ -0,0 +1,594 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WOmxBufferProducer.h"
+#include "WOmxProducerListener.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// TWOmxBufferProducer
+TWOmxBufferProducer::TWOmxBufferProducer(
+        sp<IGraphicBufferProducer> const& base):
+    mBase(base) {
+}
+
+Return<void> TWOmxBufferProducer::requestBuffer(
+        int32_t slot, requestBuffer_cb _hidl_cb) {
+    sp<GraphicBuffer> buf;
+    status_t status = mBase->requestBuffer(slot, &buf);
+    AnwBuffer anwBuffer;
+    wrapAs(&anwBuffer, *buf);
+    _hidl_cb(toStatus(status), anwBuffer);
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::setMaxDequeuedBufferCount(
+        int32_t maxDequeuedBuffers) {
+    return toStatus(mBase->setMaxDequeuedBufferCount(
+            static_cast<int>(maxDequeuedBuffers)));
+}
+
+Return<Status> TWOmxBufferProducer::setAsyncMode(bool async) {
+    return toStatus(mBase->setAsyncMode(async));
+}
+
+Return<void> TWOmxBufferProducer::dequeueBuffer(
+        uint32_t width, uint32_t height,
+        PixelFormat format, uint32_t usage,
+        bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) {
+    int slot;
+    sp<Fence> fence;
+    ::android::FrameEventHistoryDelta outTimestamps;
+    status_t status = mBase->dequeueBuffer(
+            &slot, &fence,
+            width, height,
+            static_cast<::android::PixelFormat>(format), usage,
+            getFrameTimestamps ? &outTimestamps : nullptr);
+
+    hidl_handle tFence;
+    native_handle_t* nh;
+    if (!wrapAs(&tFence, &nh, *fence)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::dequeueBuffer(): "
+                "Cannot wrap Fence in hidl_handle"));
+    }
+    FrameEventHistoryDelta tOutTimestamps;
+    std::vector<std::vector<native_handle_t*> > nhAA;
+    if (getFrameTimestamps && !wrapAs(&tOutTimestamps, &nhAA, outTimestamps)) {
+        native_handle_delete(nh);
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::dequeueBuffer(): "
+                "Cannot wrap Fence in hidl_handle"));
+    }
+
+    _hidl_cb(toStatus(status),
+            static_cast<int32_t>(slot),
+            tFence,
+            tOutTimestamps);
+    native_handle_delete(nh);
+    if (getFrameTimestamps) {
+        for (auto& nhA : nhAA) {
+            for (auto& handle : nhA) {
+                if (handle != nullptr) {
+                    native_handle_delete(handle);
+                }
+            }
+        }
+    }
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::detachBuffer(int32_t slot) {
+    return toStatus(mBase->detachBuffer(slot));
+}
+
+Return<void> TWOmxBufferProducer::detachNextBuffer(
+        detachNextBuffer_cb _hidl_cb) {
+    sp<GraphicBuffer> outBuffer;
+    sp<Fence> outFence;
+    status_t status = mBase->detachNextBuffer(&outBuffer, &outFence);
+
+    AnwBuffer tBuffer;
+    wrapAs(&tBuffer, *outBuffer);
+    hidl_handle tFence;
+    native_handle_t* nh;
+    if (!wrapAs(&tFence, &nh, *outFence)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::detachNextBuffer(): "
+                "Cannot wrap Fence in hidl_handle"));
+    }
+
+    _hidl_cb(toStatus(status), tBuffer, tFence);
+    native_handle_delete(nh);
+    return Void();
+}
+
+Return<void> TWOmxBufferProducer::attachBuffer(
+        const AnwBuffer& buffer,
+        attachBuffer_cb _hidl_cb) {
+    int outSlot;
+    sp<GraphicBuffer> lBuffer = new GraphicBuffer();
+    if (!convertTo(lBuffer.get(), buffer)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::attachBuffer(): "
+                "Cannot convert AnwBuffer to GraphicBuffer"));
+    }
+    status_t status = mBase->attachBuffer(&outSlot, lBuffer);
+
+    _hidl_cb(toStatus(status), static_cast<int32_t>(outSlot));
+    return Void();
+}
+
+Return<void> TWOmxBufferProducer::queueBuffer(
+        int32_t slot, const QueueBufferInput& input,
+        queueBuffer_cb _hidl_cb) {
+    IGraphicBufferProducer::QueueBufferInput lInput(
+            0, false, HAL_DATASPACE_UNKNOWN,
+            ::android::Rect(0, 0, 1, 1),
+            NATIVE_WINDOW_SCALING_MODE_FREEZE,
+            0, ::android::Fence::NO_FENCE);
+    if (!convertTo(&lInput, input)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::queueBuffer(): "
+                "Cannot convert IOmxBufferProducer::QueueBufferInput "
+                "to IGraphicBufferProducer::QueueBufferInput"));
+    }
+    IGraphicBufferProducer::QueueBufferOutput lOutput;
+    status_t status = mBase->queueBuffer(
+            static_cast<int>(slot), lInput, &lOutput);
+
+    QueueBufferOutput tOutput;
+    std::vector<std::vector<native_handle_t*> > nhAA;
+    if (!wrapAs(&tOutput, &nhAA, lOutput)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::queueBuffer(): "
+                "Cannot wrap IGraphicBufferProducer::QueueBufferOutput "
+                "in IOmxBufferProducer::QueueBufferOutput"));
+    }
+
+    _hidl_cb(toStatus(status), tOutput);
+    for (auto& nhA : nhAA) {
+        for (auto& nh : nhA) {
+            if (nh != nullptr) {
+                native_handle_delete(nh);
+            }
+        }
+    }
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::cancelBuffer(
+        int32_t slot, const hidl_handle& fence) {
+    sp<Fence> lFence = new Fence();
+    if (!convertTo(lFence.get(), fence)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::cancelBuffer(): "
+                "Cannot convert hidl_handle to Fence"));
+    }
+    return toStatus(mBase->cancelBuffer(static_cast<int>(slot), lFence));
+}
+
+Return<void> TWOmxBufferProducer::query(int32_t what, query_cb _hidl_cb) {
+    int lValue;
+    int lReturn = mBase->query(static_cast<int>(what), &lValue);
+    _hidl_cb(static_cast<int32_t>(lReturn), static_cast<int32_t>(lValue));
+    return Void();
+}
+
+Return<void> TWOmxBufferProducer::connect(
+        const sp<IOmxProducerListener>& listener,
+        int32_t api, bool producerControlledByApp, connect_cb _hidl_cb) {
+    sp<IProducerListener> lListener = new LWOmxProducerListener(listener);
+    IGraphicBufferProducer::QueueBufferOutput lOutput;
+    status_t status = mBase->connect(lListener,
+            static_cast<int>(api),
+            producerControlledByApp,
+            &lOutput);
+
+    QueueBufferOutput tOutput;
+    std::vector<std::vector<native_handle_t*> > nhAA;
+    if (!wrapAs(&tOutput, &nhAA, lOutput)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::connect(): "
+                "Cannot wrap IGraphicBufferProducer::QueueBufferOutput "
+                "in IOmxBufferProducer::QueueBufferOutput"));
+    }
+
+    _hidl_cb(toStatus(status), tOutput);
+    for (auto& nhA : nhAA) {
+        for (auto& nh : nhA) {
+            if (nh != nullptr) {
+                native_handle_delete(nh);
+            }
+        }
+    }
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::disconnect(
+        int32_t api, DisconnectMode mode) {
+    return toStatus(mBase->disconnect(
+            static_cast<int>(api),
+            toGuiDisconnectMode(mode)));
+}
+
+Return<Status> TWOmxBufferProducer::setSidebandStream(const hidl_handle& stream) {
+    return toStatus(mBase->setSidebandStream(NativeHandle::create(
+            native_handle_clone(stream), true)));
+}
+
+Return<void> TWOmxBufferProducer::allocateBuffers(
+        uint32_t width, uint32_t height, PixelFormat format, uint32_t usage) {
+    mBase->allocateBuffers(
+            width, height,
+            static_cast<::android::PixelFormat>(format),
+            usage);
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::allowAllocation(bool allow) {
+    return toStatus(mBase->allowAllocation(allow));
+}
+
+Return<Status> TWOmxBufferProducer::setGenerationNumber(uint32_t generationNumber) {
+    return toStatus(mBase->setGenerationNumber(generationNumber));
+}
+
+Return<void> TWOmxBufferProducer::getConsumerName(getConsumerName_cb _hidl_cb) {
+    _hidl_cb(mBase->getConsumerName().string());
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::setSharedBufferMode(bool sharedBufferMode) {
+    return toStatus(mBase->setSharedBufferMode(sharedBufferMode));
+}
+
+Return<Status> TWOmxBufferProducer::setAutoRefresh(bool autoRefresh) {
+    return toStatus(mBase->setAutoRefresh(autoRefresh));
+}
+
+Return<Status> TWOmxBufferProducer::setDequeueTimeout(int64_t timeoutNs) {
+    return toStatus(mBase->setDequeueTimeout(timeoutNs));
+}
+
+Return<void> TWOmxBufferProducer::getLastQueuedBuffer(
+        getLastQueuedBuffer_cb _hidl_cb) {
+    sp<GraphicBuffer> lOutBuffer = new GraphicBuffer();
+    sp<Fence> lOutFence = new Fence();
+    float lOutTransformMatrix[16];
+    status_t status = mBase->getLastQueuedBuffer(
+            &lOutBuffer, &lOutFence, lOutTransformMatrix);
+
+    AnwBuffer tOutBuffer;
+    wrapAs(&tOutBuffer, *lOutBuffer);
+    hidl_handle tOutFence;
+    native_handle_t* nh;
+    if (!wrapAs(&tOutFence, &nh, *lOutFence)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::getLastQueuedBuffer(): "
+                "Cannot wrap Fence in hidl_handle"));
+    }
+    hidl_array<float, 16> tOutTransformMatrix(lOutTransformMatrix);
+
+    _hidl_cb(toStatus(status), tOutBuffer, tOutFence, tOutTransformMatrix);
+    native_handle_delete(nh);
+    return Void();
+}
+
+Return<void> TWOmxBufferProducer::getFrameTimestamps(
+        getFrameTimestamps_cb _hidl_cb) {
+    ::android::FrameEventHistoryDelta lDelta;
+    mBase->getFrameTimestamps(&lDelta);
+
+    FrameEventHistoryDelta tDelta;
+    std::vector<std::vector<native_handle_t*> > nhAA;
+    if (!wrapAs(&tDelta, &nhAA, lDelta)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::getFrameTimestamps(): "
+                "Cannot wrap ::android::FrameEventHistoryDelta "
+                "in FrameEventHistoryDelta"));
+    }
+
+    _hidl_cb(tDelta);
+    for (auto& nhA : nhAA) {
+        for (auto& nh : nhA) {
+            if (nh != nullptr) {
+                native_handle_delete(nh);
+            }
+        }
+    }
+    return Void();
+}
+
+Return<void> TWOmxBufferProducer::getUniqueId(getUniqueId_cb _hidl_cb) {
+    uint64_t outId;
+    status_t status = mBase->getUniqueId(&outId);
+    _hidl_cb(toStatus(status), outId);
+    return Void();
+}
+
+// LWOmxBufferProducer
+
+LWOmxBufferProducer::LWOmxBufferProducer(sp<IOmxBufferProducer> const& base) :
+    mBase(base) {
+}
+
+status_t LWOmxBufferProducer::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
+    *buf = new GraphicBuffer();
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->requestBuffer(
+            static_cast<int32_t>(slot),
+            [&fnStatus, &buf] (Status status, AnwBuffer const& buffer) {
+                fnStatus = toStatusT(status);
+                if (!convertTo(buf->get(), buffer)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::setMaxDequeuedBufferCount(
+        int maxDequeuedBuffers) {
+    return toStatusT(mBase->setMaxDequeuedBufferCount(
+            static_cast<int32_t>(maxDequeuedBuffers)));
+}
+
+status_t LWOmxBufferProducer::setAsyncMode(bool async) {
+    return toStatusT(mBase->setAsyncMode(async));
+}
+
+status_t LWOmxBufferProducer::dequeueBuffer(
+        int* slot, sp<Fence>* fence,
+        uint32_t w, uint32_t h, ::android::PixelFormat format,
+        uint32_t usage, FrameEventHistoryDelta* outTimestamps) {
+    *fence = new Fence();
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->dequeueBuffer(
+            w, h, static_cast<PixelFormat>(format), usage,
+            outTimestamps != nullptr,
+            [&fnStatus, slot, fence, outTimestamps] (
+                    Status status,
+                    int32_t tSlot,
+                    hidl_handle const& tFence,
+                    IOmxBufferProducer::FrameEventHistoryDelta const& tTs) {
+                fnStatus = toStatusT(status);
+                *slot = tSlot;
+                if (!convertTo(fence->get(), tFence)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+                if (outTimestamps && !convertTo(outTimestamps, tTs)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::detachBuffer(int slot) {
+    return toStatusT(mBase->detachBuffer(static_cast<int>(slot)));
+}
+
+status_t LWOmxBufferProducer::detachNextBuffer(
+        sp<GraphicBuffer>* outBuffer, sp<Fence>* outFence) {
+    *outBuffer = new GraphicBuffer();
+    *outFence = new Fence();
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->detachNextBuffer(
+            [&fnStatus, outBuffer, outFence] (
+                    Status status,
+                    AnwBuffer const& tBuffer,
+                    hidl_handle const& tFence) {
+                fnStatus = toStatusT(status);
+                if (!convertTo(outFence->get(), tFence)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+                if (!convertTo(outBuffer->get(), tBuffer)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::attachBuffer(
+        int* outSlot, const sp<GraphicBuffer>& buffer) {
+    AnwBuffer tBuffer;
+    wrapAs(&tBuffer, *buffer);
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->attachBuffer(tBuffer,
+            [&fnStatus, outSlot] (Status status, int32_t slot) {
+                fnStatus = toStatusT(status);
+                *outSlot = slot;
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::queueBuffer(
+        int slot,
+        const QueueBufferInput& input,
+        QueueBufferOutput* output) {
+    IOmxBufferProducer::QueueBufferInput tInput;
+    native_handle_t* nh;
+    if (!wrapAs(&tInput, &nh, input)) {
+        return BAD_VALUE;
+    }
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->queueBuffer(slot, tInput,
+            [&fnStatus, output] (
+                    Status status,
+                    IOmxBufferProducer::QueueBufferOutput const& tOutput) {
+                fnStatus = toStatusT(status);
+                if (!convertTo(output, tOutput)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+            }));
+    native_handle_delete(nh);
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::cancelBuffer(int slot, const sp<Fence>& fence) {
+    hidl_handle tFence;
+    native_handle_t* nh;
+    if (!wrapAs(&tFence, &nh, *fence)) {
+        return BAD_VALUE;
+    }
+
+    status_t status = toStatusT(mBase->cancelBuffer(
+            static_cast<int32_t>(slot), tFence));
+    native_handle_delete(nh);
+    return status;
+}
+
+int LWOmxBufferProducer::query(int what, int* value) {
+    int result;
+    status_t transStatus = toStatusT(mBase->query(
+            static_cast<int32_t>(what),
+            [&result, value] (int32_t tResult, int32_t tValue) {
+                result = static_cast<int>(tResult);
+                *value = static_cast<int>(tValue);
+            }));
+    return transStatus == NO_ERROR ? result : static_cast<int>(transStatus);
+}
+
+status_t LWOmxBufferProducer::connect(
+        const sp<IProducerListener>& listener, int api,
+        bool producerControlledByApp, QueueBufferOutput* output) {
+    sp<IOmxProducerListener> tListener = new TWOmxProducerListener(listener);
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->connect(
+            tListener, static_cast<int32_t>(api), producerControlledByApp,
+            [&fnStatus, output] (
+                    Status status,
+                    IOmxBufferProducer::QueueBufferOutput const& tOutput) {
+                fnStatus = toStatusT(status);
+                if (!convertTo(output, tOutput)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::disconnect(int api, DisconnectMode mode) {
+    return toStatusT(mBase->disconnect(
+            static_cast<int32_t>(api), toOmxDisconnectMode(mode)));
+}
+
+status_t LWOmxBufferProducer::setSidebandStream(
+        const sp<NativeHandle>& stream) {
+    return toStatusT(mBase->setSidebandStream(stream->handle()));
+}
+
+void LWOmxBufferProducer::allocateBuffers(uint32_t width, uint32_t height,
+        ::android::PixelFormat format, uint32_t usage) {
+    mBase->allocateBuffers(
+            width, height, static_cast<PixelFormat>(format), usage);
+}
+
+status_t LWOmxBufferProducer::allowAllocation(bool allow) {
+    return toStatusT(mBase->allowAllocation(allow));
+}
+
+status_t LWOmxBufferProducer::setGenerationNumber(uint32_t generationNumber) {
+    return toStatusT(mBase->setGenerationNumber(generationNumber));
+}
+
+String8 LWOmxBufferProducer::getConsumerName() const {
+    String8 lName;
+    mBase->getConsumerName([&lName] (hidl_string const& name) {
+                lName = name.c_str();
+            });
+    return lName;
+}
+
+status_t LWOmxBufferProducer::setSharedBufferMode(bool sharedBufferMode) {
+    return toStatusT(mBase->setSharedBufferMode(sharedBufferMode));
+}
+
+status_t LWOmxBufferProducer::setAutoRefresh(bool autoRefresh) {
+    return toStatusT(mBase->setAutoRefresh(autoRefresh));
+}
+
+status_t LWOmxBufferProducer::setDequeueTimeout(nsecs_t timeout) {
+    return toStatusT(mBase->setDequeueTimeout(static_cast<int64_t>(timeout)));
+}
+
+status_t LWOmxBufferProducer::getLastQueuedBuffer(
+        sp<GraphicBuffer>* outBuffer,
+        sp<Fence>* outFence,
+        float outTransformMatrix[16]) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->getLastQueuedBuffer(
+            [&fnStatus, outBuffer, outFence, &outTransformMatrix] (
+                    Status status,
+                    AnwBuffer const& buffer,
+                    hidl_handle const& fence,
+                    hidl_array<float, 16> const& transformMatrix) {
+                fnStatus = toStatusT(status);
+                *outBuffer = new GraphicBuffer();
+                if (!convertTo(outBuffer->get(), buffer)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+                *outFence = new Fence();
+                if (!convertTo(outFence->get(), fence)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+                std::copy(transformMatrix.data(),
+                        transformMatrix.data() + 16,
+                        outTransformMatrix);
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+void LWOmxBufferProducer::getFrameTimestamps(FrameEventHistoryDelta* outDelta) {
+    mBase->getFrameTimestamps([outDelta] (
+            IOmxBufferProducer::FrameEventHistoryDelta const& tDelta) {
+                convertTo(outDelta, tDelta);
+            });
+}
+
+status_t LWOmxBufferProducer::getUniqueId(uint64_t* outId) const {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->getUniqueId(
+            [&fnStatus, outId] (Status status, uint64_t id) {
+                fnStatus = toStatusT(status);
+                *outId = id;
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+::android::IBinder* LWOmxBufferProducer::onAsBinder() {
+    return nullptr;
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.h b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.h
new file mode 100644
index 0000000..a991f49
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
+
+#include <android/hardware/media/omx/1.0/IOmxBufferProducer.h>
+#include <binder/Binder.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/IProducerListener.h>
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+using ::android::hardware::media::omx::V1_0::IOmxBufferProducer;
+using ::android::hardware::media::omx::V1_0::IOmxProducerListener;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hardware::media::V1_0::AnwBuffer;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::IGraphicBufferProducer;
+using ::android::IProducerListener;
+
+struct TWOmxBufferProducer : public IOmxBufferProducer {
+    sp<IGraphicBufferProducer> mBase;
+    TWOmxBufferProducer(sp<IGraphicBufferProducer> const& base);
+    Return<void> requestBuffer(int32_t slot, requestBuffer_cb _hidl_cb)
+            override;
+    Return<Status> setMaxDequeuedBufferCount(int32_t maxDequeuedBuffers)
+            override;
+    Return<Status> setAsyncMode(bool async) override;
+    Return<void> dequeueBuffer(
+            uint32_t width, uint32_t height, PixelFormat format, uint32_t usage,
+            bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) override;
+    Return<Status> detachBuffer(int32_t slot) override;
+    Return<void> detachNextBuffer(detachNextBuffer_cb _hidl_cb) override;
+    Return<void> attachBuffer(const AnwBuffer& buffer, attachBuffer_cb _hidl_cb)
+            override;
+    Return<void> queueBuffer(
+            int32_t slot, const IOmxBufferProducer::QueueBufferInput& input,
+            queueBuffer_cb _hidl_cb) override;
+    Return<Status> cancelBuffer(int32_t slot, const hidl_handle& fence)
+            override;
+    Return<void> query(int32_t what, query_cb _hidl_cb) override;
+    Return<void> connect(const sp<IOmxProducerListener>& listener,
+            int32_t api, bool producerControlledByApp,
+            connect_cb _hidl_cb) override;
+    Return<Status> disconnect(
+            int32_t api,
+            IOmxBufferProducer::DisconnectMode mode) override;
+    Return<Status> setSidebandStream(const hidl_handle& stream) override;
+    Return<void> allocateBuffers(
+            uint32_t width, uint32_t height,
+            PixelFormat format, uint32_t usage) override;
+    Return<Status> allowAllocation(bool allow) override;
+    Return<Status> setGenerationNumber(uint32_t generationNumber) override;
+    Return<void> getConsumerName(getConsumerName_cb _hidl_cb) override;
+    Return<Status> setSharedBufferMode(bool sharedBufferMode) override;
+    Return<Status> setAutoRefresh(bool autoRefresh) override;
+    Return<Status> setDequeueTimeout(int64_t timeoutNs) override;
+    Return<void> getLastQueuedBuffer(getLastQueuedBuffer_cb _hidl_cb) override;
+    Return<void> getFrameTimestamps(getFrameTimestamps_cb _hidl_cb) override;
+    Return<void> getUniqueId(getUniqueId_cb _hidl_cb) override;
+};
+
+struct LWOmxBufferProducer : public IGraphicBufferProducer {
+    sp<IOmxBufferProducer> mBase;
+    LWOmxBufferProducer(sp<IOmxBufferProducer> const& base);
+
+    status_t requestBuffer(int slot, sp<GraphicBuffer>* buf) override;
+    status_t setMaxDequeuedBufferCount(int maxDequeuedBuffers) override;
+    status_t setAsyncMode(bool async) override;
+    status_t dequeueBuffer(int* slot, sp<Fence>* fence, uint32_t w,
+            uint32_t h, ::android::PixelFormat format, uint32_t usage,
+            FrameEventHistoryDelta* outTimestamps) override;
+    status_t detachBuffer(int slot) override;
+    status_t detachNextBuffer(sp<GraphicBuffer>* outBuffer, sp<Fence>* outFence)
+            override;
+    status_t attachBuffer(int* outSlot, const sp<GraphicBuffer>& buffer)
+            override;
+    status_t queueBuffer(int slot,
+            const QueueBufferInput& input,
+            QueueBufferOutput* output) override;
+    status_t cancelBuffer(int slot, const sp<Fence>& fence) override;
+    int query(int what, int* value) override;
+    status_t connect(const sp<IProducerListener>& listener, int api,
+            bool producerControlledByApp, QueueBufferOutput* output) override;
+    status_t disconnect(int api, DisconnectMode mode = DisconnectMode::Api)
+            override;
+    status_t setSidebandStream(const sp<NativeHandle>& stream) override;
+    void allocateBuffers(uint32_t width, uint32_t height,
+            ::android::PixelFormat format, uint32_t usage) override;
+    status_t allowAllocation(bool allow) override;
+    status_t setGenerationNumber(uint32_t generationNumber) override;
+    String8 getConsumerName() const override;
+    status_t setSharedBufferMode(bool sharedBufferMode) override;
+    status_t setAutoRefresh(bool autoRefresh) override;
+    status_t setDequeueTimeout(nsecs_t timeout) override;
+    status_t getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer,
+          sp<Fence>* outFence, float outTransformMatrix[16]) override;
+    void getFrameTimestamps(FrameEventHistoryDelta* outDelta) override;
+    status_t getUniqueId(uint64_t* outId) const override;
+protected:
+    ::android::IBinder* onAsBinder() override;
+};
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
diff --git a/media/libstagefright/omx/hal/1.0/WOmxBufferSource.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.cpp
similarity index 77%
rename from media/libstagefright/omx/hal/1.0/WOmxBufferSource.cpp
rename to media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.cpp
index 79eb1be..2e00894 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxBufferSource.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #include "WOmxBufferSource.h"
 #include "Conversion.h"
 #include <utils/String8.h>
@@ -82,7 +98,10 @@
 Return<void> TWOmxBufferSource::onInputBufferEmptied(
         uint32_t buffer, hidl_handle const& fence) {
     OMXFenceParcelable fenceParcelable;
-    wrapAs(&fenceParcelable, fence);
+    if (!convertTo(&fenceParcelable, fence)) {
+      return ::android::hardware::Status::fromExceptionCode(
+              ::android::hardware::Status::EX_BAD_PARCELABLE);
+    }
     return toHardwareStatus(mBase->onInputBufferEmptied(
             static_cast<int32_t>(buffer), fenceParcelable));
 }
diff --git a/media/libstagefright/omx/hal/1.0/WOmxBufferSource.h b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.h
similarity index 79%
rename from media/libstagefright/omx/hal/1.0/WOmxBufferSource.h
rename to media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.h
index 3ba9453..a2e940f 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxBufferSource.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
 #define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
 
diff --git a/media/libstagefright/omx/hal/1.0/WOmxNode.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxNode.cpp
similarity index 90%
rename from media/libstagefright/omx/hal/1.0/WOmxNode.cpp
rename to media/libstagefright/omx/hal/1.0/impl/WOmxNode.cpp
index 0e781d8..6b21138 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxNode.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxNode.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #include <IOMX.h>
 #include <OMXNodeInstance.h>
 #include "WOmxNode.h"
@@ -50,7 +66,6 @@
 status_t LWOmxNode::setParameter(
         OMX_INDEXTYPE index, const void *params, size_t size) {
     hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
-    tParams = inHidlBytes(params, size);
     return toStatusT(mBase->setParameter(
             toRawIndexType(index), tParams));
 }
@@ -102,7 +117,8 @@
             [&fnStatus, sidebandHandle](
                     Status status, hidl_handle const& outSidebandHandle) {
                 fnStatus = toStatusT(status);
-                *sidebandHandle = native_handle_clone(outSidebandHandle);
+                *sidebandHandle = outSidebandHandle == nullptr ?
+                        nullptr : native_handle_clone(outSidebandHandle);
             }));
     return transStatus == NO_ERROR ? fnStatus : transStatus;
 }
@@ -180,6 +196,7 @@
     }
     status_t status = toStatusT(mBase->fillBuffer(
             buffer, codecBuffer, fenceNh));
+    native_handle_close(fenceNh);
     native_handle_delete(fenceNh);
     return status;
 }
@@ -201,6 +218,7 @@
             flags,
             toRawTicks(timestamp),
             fenceNh));
+    native_handle_close(fenceNh);
     native_handle_delete(fenceNh);
     return status;
 }
@@ -224,13 +242,14 @@
         return NO_MEMORY;
     }
     status_t status = toStatusT(mBase->dispatchMessage(tMsg));
+    native_handle_close(nh);
     native_handle_delete(nh);
     return status;
 }
 
 // TODO: this is temporary, will be removed when quirks move to OMX side.
-status_t LWOmxNode::setQuirks(OMX_U32 /* quirks */) {
-    return NO_ERROR;
+status_t LWOmxNode::setQuirks(OMX_U32 quirks) {
+    return toStatusT(mBase->setQuirks(static_cast<uint32_t>(quirks)));;
 }
 
 ::android::IBinder* LWOmxNode::onAsBinder() {
@@ -306,7 +325,7 @@
 Return<void> TWOmxNode::configureVideoTunnelMode(
         uint32_t portIndex, bool tunneled, uint32_t audioHwSync,
         configureVideoTunnelMode_cb _hidl_cb) {
-    native_handle_t* sidebandHandle;
+    native_handle_t* sidebandHandle = nullptr;
     Status status = toStatus(mBase->configureVideoTunnelMode(
             portIndex,
             toEnumBool(tunneled),
@@ -376,7 +395,7 @@
     return toStatus(mBase->fillBuffer(
             buffer,
             omxBuffer,
-            native_handle_read_fd(fence)));
+            dup(native_handle_read_fd(fence))));
 }
 
 Return<Status> TWOmxNode::emptyBuffer(
@@ -391,7 +410,7 @@
             omxBuffer,
             flags,
             toOMXTicks(timestampUs),
-            native_handle_read_fd(fence)));
+            dup(native_handle_read_fd(fence))));
 }
 
 Return<void> TWOmxNode::getExtensionIndex(
@@ -406,12 +425,16 @@
 
 Return<Status> TWOmxNode::dispatchMessage(const Message& tMsg) {
     omx_message lMsg;
-    if (!wrapAs(&lMsg, tMsg)) {
+    if (!convertTo(&lMsg, tMsg)) {
         return Status::BAD_VALUE;
     }
     return toStatus(mBase->dispatchMessage(lMsg));
 }
 
+Return<void> TWOmxNode::setQuirks(uint32_t quirks) {
+    mBase->setQuirks(static_cast<OMX_U32>(quirks));
+    return Void();
+}
 
 }  // namespace implementation
 }  // namespace V1_0
diff --git a/media/libstagefright/omx/hal/1.0/WOmxNode.h b/media/libstagefright/omx/hal/1.0/impl/WOmxNode.h
similarity index 89%
copy from media/libstagefright/omx/hal/1.0/WOmxNode.h
copy to media/libstagefright/omx/hal/1.0/impl/WOmxNode.h
index 3a459d6..d606f3a 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxNode.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxNode.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
 #define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
 
@@ -137,6 +153,7 @@
             hidl_string const& parameterName,
             getExtensionIndex_cb _hidl_cb) override;
     Return<Status> dispatchMessage(Message const& msg) override;
+    Return<void> setQuirks(uint32_t quirks) override;
 };
 
 }  // namespace implementation
diff --git a/media/libstagefright/omx/hal/1.0/WOmxObserver.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp
similarity index 66%
rename from media/libstagefright/omx/hal/1.0/WOmxObserver.cpp
rename to media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp
index f3ad8d3..d5549fb 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxObserver.cpp
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #include "WOmxObserver.h"
 
 #include <vector>
@@ -29,6 +45,7 @@
     }
     mBase->onMessages(tMessages);
     for (auto& handle : handles) {
+        native_handle_close(handle);
         native_handle_delete(handle);
     }
 }
@@ -45,7 +62,7 @@
     std::list<omx_message> lMessages;
     for (size_t i = 0; i < tMessages.size(); ++i) {
         lMessages.push_back(omx_message{});
-        wrapAs(&lMessages.back(), tMessages[i]);
+        convertTo(&lMessages.back(), tMessages[i]);
     }
     mBase->onMessages(lMessages);
     return Return<void>();
diff --git a/media/libstagefright/omx/hal/1.0/WOmxObserver.h b/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.h
similarity index 73%
rename from media/libstagefright/omx/hal/1.0/WOmxObserver.h
rename to media/libstagefright/omx/hal/1.0/impl/WOmxObserver.h
index c8ab296..85593c3 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxObserver.h
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
 #define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
 
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.cpp
new file mode 100644
index 0000000..fa6e9aa
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WOmxProducerListener.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// TWOmxProducerListener
+TWOmxProducerListener::TWOmxProducerListener(
+        sp<IProducerListener> const& base):
+    mBase(base) {
+}
+
+Return<void> TWOmxProducerListener::onBufferReleased() {
+    mBase->onBufferReleased();
+    return Void();
+}
+
+Return<bool> TWOmxProducerListener::needsReleaseNotify() {
+    return mBase->needsReleaseNotify();
+}
+
+// LWOmxProducerListener
+LWOmxProducerListener::LWOmxProducerListener(
+        sp<IOmxProducerListener> const& base):
+    mBase(base) {
+}
+
+void LWOmxProducerListener::onBufferReleased() {
+    mBase->onBufferReleased();
+}
+
+bool LWOmxProducerListener::needsReleaseNotify() {
+    return static_cast<bool>(mBase->needsReleaseNotify());
+}
+
+::android::IBinder* LWOmxProducerListener::onAsBinder() {
+    return nullptr;
+}
+
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.h b/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.h
new file mode 100644
index 0000000..b93a555
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
+
+#include <android/hardware/media/omx/1.0/IOmxProducerListener.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <binder/IBinder.h>
+#include <gui/IProducerListener.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::IOmxProducerListener;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::IProducerListener;
+
+struct TWOmxProducerListener : public IOmxProducerListener {
+    sp<IProducerListener> mBase;
+    TWOmxProducerListener(sp<IProducerListener> const& base);
+    Return<void> onBufferReleased() override;
+    Return<bool> needsReleaseNotify() override;
+};
+
+class LWOmxProducerListener : public IProducerListener {
+public:
+    sp<IOmxProducerListener> mBase;
+    LWOmxProducerListener(sp<IOmxProducerListener> const& base);
+    void onBufferReleased() override;
+    bool needsReleaseNotify() override;
+protected:
+    ::android::IBinder* onAsBinder() override;
+};
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
diff --git a/media/libstagefright/omx/hal/1.0/utils/Android.mk b/media/libstagefright/omx/hal/1.0/utils/Android.mk
new file mode 100644
index 0000000..6930c87
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/utils/Android.mk
@@ -0,0 +1,42 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := android.hardware.media.omx@1.0-utils
+LOCAL_SRC_FILES := \
+    WGraphicBufferSource.cpp \
+    WOmx.cpp \
+    WOmxBufferProducer.cpp \
+    WOmxBufferSource.cpp \
+    WOmxNode.cpp \
+    WOmxObserver.cpp \
+    WOmxProducerListener.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+    libmedia \
+    libstagefright_foundation \
+    libstagefright_omx \
+    libui \
+    libgui \
+    libhidlbase \
+    libhidltransport \
+    libhwbinder \
+    libhidlmemory \
+    libutils \
+    libcutils \
+    libbinder \
+    liblog \
+    android.hardware.media.omx@1.0 \
+    android.hardware.graphics.common@1.0 \
+    android.hardware.media@1.0 \
+    android.hidl.base@1.0 \
+
+LOCAL_C_INCLUDES += \
+        $(TOP) \
+        $(TOP)/frameworks/av/include/media \
+        $(TOP)/frameworks/av/media/libstagefright/include \
+        $(TOP)/frameworks/av/media/libstagefright/omx \
+        $(TOP)/frameworks/native/include/media/hardware \
+        $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/frameworks/native/include \
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/omx/hal/1.0/utils/Conversion.h b/media/libstagefright/omx/hal/1.0/utils/Conversion.h
new file mode 100644
index 0000000..6f4d864
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/utils/Conversion.h
@@ -0,0 +1,2156 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+#include <hidlmemory/mapping.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+
+#include <unistd.h>
+#include <vector>
+#include <list>
+
+#include <binder/Binder.h>
+#include <binder/Status.h>
+#include <ui/FenceTime.h>
+
+#include <OMXFenceParcelable.h>
+#include <cutils/native_handle.h>
+#include <gui/IGraphicBufferProducer.h>
+
+#include <IOMX.h>
+#include <VideoAPI.h>
+#include <OMXBuffer.h>
+#include <android/IOMXBufferSource.h>
+#include <android/IGraphicBufferSource.h>
+
+#include <android/hardware/media/omx/1.0/types.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/IOmxBufferProducer.h>
+#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+#include <android/hardware/media/omx/1.0/IOmxProducerListener.h>
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::String8;
+using ::android::OMXFenceParcelable;
+
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::omx_message;
+
+using ::android::hardware::media::omx::V1_0::ColorAspects;
+using ::android::hardware::media::V1_0::Rect;
+using ::android::hardware::media::V1_0::Region;
+
+using ::android::hardware::graphics::common::V1_0::Dataspace;
+
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+
+using ::android::OMXBuffer;
+
+using ::android::hardware::media::V1_0::AnwBuffer;
+using ::android::GraphicBuffer;
+
+using ::android::hardware::media::omx::V1_0::IOmx;
+using ::android::IOMX;
+
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::IOMXNode;
+
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::IOMXObserver;
+
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::IOMXBufferSource;
+
+using ::android::hardware::media::omx::V1_0::IOmxBufferProducer;
+using ::android::IGraphicBufferProducer;
+
+// native_handle_t helper functions.
+
+/**
+ * \brief Take an fd and create a native handle containing only the given fd.
+ * The created handle will need to be deleted manually with
+ * `native_handle_delete()`.
+ *
+ * \param[in] fd The source file descriptor (of type `int`).
+ * \return The create `native_handle_t*` that contains the given \p fd. If the
+ * supplied \p fd is negative, the created native handle will contain no file
+ * descriptors.
+ *
+ * If the native handle cannot be created, the return value will be
+ * `nullptr`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+inline native_handle_t* native_handle_create_from_fd(int fd) {
+    if (fd < 0) {
+        return native_handle_create(0, 0);
+    }
+    native_handle_t* nh = native_handle_create(1, 0);
+    if (nh == nullptr) {
+        return nullptr;
+    }
+    nh->data[0] = fd;
+    return nh;
+}
+
+/**
+ * \brief Extract a file descriptor from a native handle.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \param[in] index The index of the file descriptor in \p nh to read from. This
+ * input has the default value of `0`.
+ * \return The `index`-th file descriptor in \p nh. If \p nh does not have
+ * enough file descriptors, the returned value will be `-1`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+inline int native_handle_read_fd(native_handle_t const* nh, int index = 0) {
+    return ((nh == nullptr) || (nh->numFds == 0) ||
+            (nh->numFds <= index) || (index < 0)) ?
+            -1 : nh->data[index];
+}
+
+/**
+ * Conversion functions
+ * ====================
+ *
+ * There are two main directions of conversion:
+ * - `inTargetType(...)`: Create a wrapper whose lifetime depends on the
+ *   input. The wrapper has type `TargetType`.
+ * - `toTargetType(...)`: Create a standalone object of type `TargetType` that
+ *   corresponds to the input. The lifetime of the output does not depend on the
+ *   lifetime of the input.
+ * - `wrapIn(TargetType*, ...)`: Same as `inTargetType()`, but for `TargetType`
+ *   that cannot be copied and/or moved efficiently, or when there are multiple
+ *   output arguments.
+ * - `convertTo(TargetType*, ...)`: Same as `toTargetType()`, but for
+ *   `TargetType` that cannot be copied and/or moved efficiently, or when there
+ *   are multiple output arguments.
+ *
+ * `wrapIn()` and `convertTo()` functions will take output arguments before
+ * input arguments. Some of these functions might return a value to indicate
+ * success or error.
+ *
+ * In converting or wrapping something as a Treble type that contains a
+ * `hidl_handle`, `native_handle_t*` will need to be created and returned as
+ * an additional output argument, hence only `wrapIn()` or `convertTo()` would
+ * be available. The caller must call `native_handle_delete()` to deallocate the
+ * returned native handle when it is no longer needed.
+ *
+ * For types that contain file descriptors, `inTargetType()` and `wrapAs()` do
+ * not perform duplication of file descriptors, while `toTargetType()` and
+ * `convertTo()` do.
+ */
+
+/**
+ * \brief Convert `binder::Status` to `Return<void>`.
+ *
+ * \param[in] l The source `binder::Status`.
+ * \return The corresponding `Return<void>`.
+ */
+// convert: ::android::binder::Status -> Return<void>
+inline Return<void> toHardwareStatus(
+        ::android::binder::Status const& l) {
+    if (l.exceptionCode() == ::android::binder::Status::EX_SERVICE_SPECIFIC) {
+        return ::android::hardware::Status::fromServiceSpecificError(
+                l.serviceSpecificErrorCode(),
+                l.exceptionMessage());
+    }
+    return ::android::hardware::Status::fromExceptionCode(
+            l.exceptionCode(),
+            l.exceptionMessage());
+}
+
+/**
+ * \brief Convert `Return<void>` to `binder::Status`.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `binder::Status`.
+ */
+// convert: Return<void> -> ::android::binder::Status
+inline ::android::binder::Status toBinderStatus(
+        Return<void> const& t) {
+    return ::android::binder::Status::fromExceptionCode(
+            t.isOk() ? OK : UNKNOWN_ERROR,
+            t.description().c_str());
+}
+
+/**
+ * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
+ * calls.
+ *
+ * \param[in] t The source `Return<Status>`.
+ * \return The corresponding `status_t`.
+ *
+ * This function first check if \p t has a transport error. If it does, then the
+ * return value is the transport error code. Otherwise, the return value is
+ * converted from `Status` contained inside \p t.
+ *
+ * Note:
+ * - This `Status` is omx-specific. It is defined in `types.hal`.
+ * - The name of this function is not `convert`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Return<Status> const& t) {
+    return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
+}
+
+/**
+ * \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `status_t`.
+ */
+// convert: Return<void> -> status_t
+inline status_t toStatusT(Return<void> const& t) {
+    return t.isOk() ? OK : UNKNOWN_ERROR;
+}
+
+/**
+ * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Status`.
+ * \return the corresponding `status_t`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Status const& t) {
+    return static_cast<status_t>(t);
+}
+
+/**
+ * \brief Convert `status_t` to `Status`.
+ *
+ * \param[in] l The source `status_t`.
+ * \return The corresponding `Status`.
+ */
+// convert: status_t -> Status
+inline Status toStatus(status_t l) {
+    return static_cast<Status>(l);
+}
+
+/**
+ * \brief Wrap `native_handle_t*` in `hidl_handle`.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \return The `hidl_handle` that points to \p nh.
+ */
+// wrap: native_handle_t* -> hidl_handle
+inline hidl_handle inHidlHandle(native_handle_t const* nh) {
+    return hidl_handle(nh);
+}
+
+/**
+ * \brief Wrap an `omx_message` and construct the corresponding `Message`.
+ *
+ * \param[out] t The wrapper of type `Message`.
+ * \param[out] nh The native_handle_t referred to by `t->fence`.
+ * \param[in] l The source `omx_message`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ *
+ * Upon success, \p nh will be created to hold the file descriptor stored in
+ * `l.fenceFd`, and `t->fence` will point to \p nh. \p nh will need to be
+ * destroyed manually by `native_handle_delete()` when \p t is no longer needed.
+ *
+ * Upon failure, \p nh will not be created and will not need to be deleted. \p t
+ * will be invalid.
+ */
+// wrap, omx_message -> Message, native_handle_t*
+inline bool wrapAs(Message* t, native_handle_t** nh, omx_message const& l) {
+    *nh = native_handle_create_from_fd(l.fenceFd);
+    if (!*nh) {
+        return false;
+    }
+    t->fence = inHidlHandle(*nh);
+    switch (l.type) {
+        case omx_message::EVENT:
+            t->type = Message::Type::EVENT;
+            t->data.eventData.event = uint32_t(l.u.event_data.event);
+            t->data.eventData.data1 = l.u.event_data.data1;
+            t->data.eventData.data2 = l.u.event_data.data2;
+            t->data.eventData.data3 = l.u.event_data.data3;
+            t->data.eventData.data4 = l.u.event_data.data4;
+            break;
+        case omx_message::EMPTY_BUFFER_DONE:
+            t->type = Message::Type::EMPTY_BUFFER_DONE;
+            t->data.bufferData.buffer = l.u.buffer_data.buffer;
+            break;
+        case omx_message::FILL_BUFFER_DONE:
+            t->type = Message::Type::FILL_BUFFER_DONE;
+            t->data.extendedBufferData.buffer = l.u.extended_buffer_data.buffer;
+            t->data.extendedBufferData.rangeOffset =
+                    l.u.extended_buffer_data.range_offset;
+            t->data.extendedBufferData.rangeLength =
+                    l.u.extended_buffer_data.range_length;
+            t->data.extendedBufferData.flags = l.u.extended_buffer_data.flags;
+            t->data.extendedBufferData.timestampUs =
+                    l.u.extended_buffer_data.timestamp;
+            break;
+        case omx_message::FRAME_RENDERED:
+            t->type = Message::Type::FRAME_RENDERED;
+            t->data.renderData.timestampUs = l.u.render_data.timestamp;
+            t->data.renderData.systemTimeNs = l.u.render_data.nanoTime;
+            break;
+        default:
+            native_handle_delete(*nh);
+            return false;
+    }
+    return true;
+}
+
+/**
+ * \brief Wrap a `Message` inside an `omx_message`.
+ *
+ * \param[out] l The wrapper of type `omx_message`.
+ * \param[in] t The source `Message`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ */
+// wrap: Message -> omx_message
+inline bool wrapAs(omx_message* l, Message const& t) {
+    l->fenceFd = native_handle_read_fd(t.fence);
+    switch (t.type) {
+        case Message::Type::EVENT:
+            l->type = omx_message::EVENT;
+            l->u.event_data.event = OMX_EVENTTYPE(t.data.eventData.event);
+            l->u.event_data.data1 = t.data.eventData.data1;
+            l->u.event_data.data2 = t.data.eventData.data2;
+            l->u.event_data.data3 = t.data.eventData.data3;
+            l->u.event_data.data4 = t.data.eventData.data4;
+            break;
+        case Message::Type::EMPTY_BUFFER_DONE:
+            l->type = omx_message::EMPTY_BUFFER_DONE;
+            l->u.buffer_data.buffer = t.data.bufferData.buffer;
+            break;
+        case Message::Type::FILL_BUFFER_DONE:
+            l->type = omx_message::FILL_BUFFER_DONE;
+            l->u.extended_buffer_data.buffer = t.data.extendedBufferData.buffer;
+            l->u.extended_buffer_data.range_offset =
+                    t.data.extendedBufferData.rangeOffset;
+            l->u.extended_buffer_data.range_length =
+                    t.data.extendedBufferData.rangeLength;
+            l->u.extended_buffer_data.flags = t.data.extendedBufferData.flags;
+            l->u.extended_buffer_data.timestamp =
+                    t.data.extendedBufferData.timestampUs;
+            break;
+        case Message::Type::FRAME_RENDERED:
+            l->type = omx_message::FRAME_RENDERED;
+            l->u.render_data.timestamp = t.data.renderData.timestampUs;
+            l->u.render_data.nanoTime = t.data.renderData.systemTimeNs;
+            break;
+        default:
+            return false;
+    }
+    return true;
+}
+
+/**
+ * \brief Similar to `wrapTo(omx_message*, Message const&)`, but the output will
+ * have an extended lifetime.
+ *
+ * \param[out] l The output `omx_message`.
+ * \param[in] t The source `Message`.
+ * \return `true` if the conversion is successful; `false` otherwise.
+ *
+ * This function calls `wrapto()`, then attempts to duplicate the file
+ * descriptor for the fence if it is not `-1`. If duplication fails, `false`
+ * will be returned.
+ */
+// convert: Message -> omx_message
+inline bool convertTo(omx_message* l, Message const& t) {
+    if (!wrapAs(l, t)) {
+        return false;
+    }
+    if (l->fenceFd == -1) {
+        return true;
+    }
+    l->fenceFd = dup(l->fenceFd);
+    return l->fenceFd != -1;
+}
+
+/**
+ * \brief Wrap an `OMXFenceParcelable` inside a `hidl_handle`.
+ *
+ * \param[out] t The wrapper of type `hidl_handle`.
+ * \param[out] nh The native handle created to hold the file descriptor inside
+ * \p l.
+ * \param[in] l The source `OMXFenceParcelable`, which essentially contains one
+ * file descriptor.
+ * \return `true` if \p t and \p nh are successfully created to wrap around \p
+ * l; `false` otherwise.
+ *
+ * On success, \p nh needs to be deleted by the caller with
+ * `native_handle_delete()` after \p t and \p nh are no longer needed.
+ *
+ * On failure, \p nh will not need to be deleted, and \p t will hold an invalid
+ * value.
+ */
+// wrap: OMXFenceParcelable -> hidl_handle, native_handle_t*
+inline bool wrapAs(hidl_handle* t, native_handle_t** nh,
+        OMXFenceParcelable const& l) {
+    *nh = native_handle_create_from_fd(l.get());
+    if (!*nh) {
+        return false;
+    }
+    *t = *nh;
+    return true;
+}
+
+/**
+ * \brief Wrap a `hidl_handle` inside an `OMXFenceParcelable`.
+ *
+ * \param[out] l The wrapper of type `OMXFenceParcelable`.
+ * \param[in] t The source `hidl_handle`.
+ */
+// wrap: hidl_handle -> OMXFenceParcelable
+inline void wrapAs(OMXFenceParcelable* l, hidl_handle const& t) {
+    l->mFenceFd = native_handle_read_fd(t);
+}
+
+/**
+ * \brief Convert a `hidl_handle` to `OMXFenceParcelable`. If `hidl_handle`
+ * contains file descriptors, the first file descriptor will be duplicated and
+ * stored in the output `OMXFenceParcelable`.
+ *
+ * \param[out] l The output `OMXFenceParcelable`.
+ * \param[in] t The input `hidl_handle`.
+ * \return `false` if \p t contains a valid file descriptor but duplication
+ * fails; `true` otherwise.
+ */
+// convert: hidl_handle -> OMXFenceParcelable
+inline bool convertTo(OMXFenceParcelable* l, hidl_handle const& t) {
+    int fd = native_handle_read_fd(t);
+    if (fd != -1) {
+        fd = dup(fd);
+        if (fd == -1) {
+            return false;
+        }
+    }
+    l->mFenceFd = fd;
+    return true;
+}
+
+/**
+ * \brief Convert `::android::ColorAspects` to `ColorAspects`.
+ *
+ * \param[in] l The source `::android::ColorAspects`.
+ * \return The corresponding `ColorAspects`.
+ */
+// convert: ::android::ColorAspects -> ColorAspects
+inline ColorAspects toHardwareColorAspects(::android::ColorAspects const& l) {
+    return ColorAspects{
+            static_cast<ColorAspects::Range>(l.mRange),
+            static_cast<ColorAspects::Primaries>(l.mPrimaries),
+            static_cast<ColorAspects::Transfer>(l.mTransfer),
+            static_cast<ColorAspects::MatrixCoeffs>(l.mMatrixCoeffs)};
+}
+
+/**
+ * \brief Convert `int32_t` to `ColorAspects`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \return The corresponding `ColorAspects`.
+ */
+// convert: int32_t -> ColorAspects
+inline ColorAspects toHardwareColorAspects(int32_t l) {
+    return ColorAspects{
+            static_cast<ColorAspects::Range>((l >> 24) & 0xFF),
+            static_cast<ColorAspects::Primaries>((l >> 16) & 0xFF),
+            static_cast<ColorAspects::Transfer>(l & 0xFF),
+            static_cast<ColorAspects::MatrixCoeffs>((l >> 8) & 0xFF)};
+}
+
+/**
+ * \brief Convert `ColorAspects` to `::android::ColorAspects`.
+ *
+ * \param[in] t The source `ColorAspects`.
+ * \return The corresponding `::android::ColorAspects`.
+ */
+// convert: ColorAspects -> ::android::ColorAspects
+inline int32_t toCompactColorAspects(ColorAspects const& t) {
+    return static_cast<int32_t>(
+            (static_cast<uint32_t>(t.range) << 24) |
+            (static_cast<uint32_t>(t.primaries) << 16) |
+            (static_cast<uint32_t>(t.transfer)) |
+            (static_cast<uint32_t>(t.matrixCoeffs) << 8));
+}
+
+/**
+ * \brief Convert `int32_t` to `Dataspace`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \result The corresponding `Dataspace`.
+ */
+// convert: int32_t -> Dataspace
+inline Dataspace toHardwareDataspace(int32_t l) {
+    return static_cast<Dataspace>(l);
+}
+
+/**
+ * \brief Convert `Dataspace` to `int32_t`.
+ *
+ * \param[in] t The source `Dataspace`.
+ * \result The corresponding `int32_t`.
+ */
+// convert: Dataspace -> int32_t
+inline int32_t toRawDataspace(Dataspace const& t) {
+    return static_cast<int32_t>(t);
+}
+
+/**
+ * \brief Wrap an opaque buffer inside a `hidl_vec<uint8_t>`.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that points to the buffer.
+ */
+// wrap: void*, size_t -> hidl_vec<uint8_t>
+inline hidl_vec<uint8_t> inHidlBytes(void const* l, size_t size) {
+    hidl_vec<uint8_t> t;
+    t.setToExternal(static_cast<uint8_t*>(const_cast<void*>(l)), size, false);
+    return t;
+}
+
+/**
+ * \brief Create a `hidl_vec<uint8_t>` that is a copy of an opaque buffer.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that is a copy of the input buffer.
+ */
+// convert: void*, size_t -> hidl_vec<uint8_t>
+inline hidl_vec<uint8_t> toHidlBytes(void const* l, size_t size) {
+    hidl_vec<uint8_t> t;
+    t.resize(size);
+    uint8_t const* src = static_cast<uint8_t const*>(l);
+    std::copy(src, src + size, t.data());
+    return t;
+}
+
+/**
+ * \brief Wrap `OMXBuffer` in `CodecBuffer`.
+ *
+ * \param[out] t The wrapper of type `CodecBuffer`.
+ * \param[in] l The source `OMXBuffer`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ */
+// wrap: OMXBuffer -> CodecBuffer
+inline bool wrapAs(CodecBuffer* t, OMXBuffer const& l) {
+    t->nativeHandle = hidl_handle();
+    t->sharedMemory = hidl_memory();
+    switch (l.mBufferType) {
+        case OMXBuffer::kBufferTypeInvalid: {
+            t->type = CodecBuffer::Type::INVALID;
+            return true;
+        }
+        case OMXBuffer::kBufferTypePreset: {
+            t->type = CodecBuffer::Type::PRESET;
+            t->attr.preset.rangeLength = static_cast<uint32_t>(l.mRangeLength);
+            t->attr.preset.rangeOffset = static_cast<uint32_t>(l.mRangeOffset);
+            return true;
+        }
+        case OMXBuffer::kBufferTypeHidlMemory: {
+            t->type = CodecBuffer::Type::SHARED_MEM;
+            t->sharedMemory = l.mHidlMemory;
+            return true;
+        }
+        case OMXBuffer::kBufferTypeSharedMem: {
+            // This is not supported.
+            return false;
+        }
+        case OMXBuffer::kBufferTypeANWBuffer: {
+            t->type = CodecBuffer::Type::ANW_BUFFER;
+            t->attr.anwBuffer.width = l.mGraphicBuffer->getWidth();
+            t->attr.anwBuffer.height = l.mGraphicBuffer->getHeight();
+            t->attr.anwBuffer.stride = l.mGraphicBuffer->getStride();
+            t->attr.anwBuffer.format = static_cast<PixelFormat>(
+                    l.mGraphicBuffer->getPixelFormat());
+            t->attr.anwBuffer.layerCount = l.mGraphicBuffer->getLayerCount();
+            t->attr.anwBuffer.usage = l.mGraphicBuffer->getUsage();
+            t->nativeHandle = hidl_handle(l.mGraphicBuffer->handle);
+            return true;
+        }
+        case OMXBuffer::kBufferTypeNativeHandle: {
+            t->type = CodecBuffer::Type::NATIVE_HANDLE;
+            t->nativeHandle = hidl_handle(l.mNativeHandle->handle());
+            return true;
+        }
+    }
+    return false;
+}
+
+/**
+ * \brief Convert `CodecBuffer` to `OMXBuffer`.
+ *
+ * \param[out] l The destination `OMXBuffer`.
+ * \param[in] t The source `CodecBuffer`.
+ * \return `true` if successful; `false` otherwise.
+ */
+// convert: CodecBuffer -> OMXBuffer
+inline bool convertTo(OMXBuffer* l, CodecBuffer const& t) {
+    switch (t.type) {
+        case CodecBuffer::Type::INVALID: {
+            *l = OMXBuffer();
+            return true;
+        }
+        case CodecBuffer::Type::PRESET: {
+            *l = OMXBuffer(
+                    t.attr.preset.rangeOffset,
+                    t.attr.preset.rangeLength);
+            return true;
+        }
+        case CodecBuffer::Type::SHARED_MEM: {
+            *l = OMXBuffer(t.sharedMemory);
+            return true;
+        }
+        case CodecBuffer::Type::ANW_BUFFER: {
+            *l = OMXBuffer(sp<GraphicBuffer>(new GraphicBuffer(
+                    t.attr.anwBuffer.width,
+                    t.attr.anwBuffer.height,
+                    static_cast<::android::PixelFormat>(
+                            t.attr.anwBuffer.format),
+                    t.attr.anwBuffer.layerCount,
+                    t.attr.anwBuffer.usage,
+                    t.attr.anwBuffer.stride,
+                    native_handle_clone(t.nativeHandle),
+                    true)));
+            return true;
+        }
+        case CodecBuffer::Type::NATIVE_HANDLE: {
+            *l = OMXBuffer(NativeHandle::create(
+                    native_handle_clone(t.nativeHandle), true));
+            return true;
+        }
+    }
+    return false;
+}
+
+/**
+ * \brief Convert `IOMX::ComponentInfo` to `IOmx::ComponentInfo`.
+ *
+ * \param[out] t The destination `IOmx::ComponentInfo`.
+ * \param[in] l The source `IOMX::ComponentInfo`.
+ */
+// convert: IOMX::ComponentInfo -> IOmx::ComponentInfo
+inline bool convertTo(IOmx::ComponentInfo* t, IOMX::ComponentInfo const& l) {
+    t->mName = l.mName.string();
+    t->mRoles.resize(l.mRoles.size());
+    size_t i = 0;
+    for (auto& role : l.mRoles) {
+        t->mRoles[i++] = role.string();
+    }
+    return true;
+}
+
+/**
+ * \brief Convert `IOmx::ComponentInfo` to `IOMX::ComponentInfo`.
+ *
+ * \param[out] l The destination `IOMX::ComponentInfo`.
+ * \param[in] t The source `IOmx::ComponentInfo`.
+ */
+// convert: IOmx::ComponentInfo -> IOMX::ComponentInfo
+inline bool convertTo(IOMX::ComponentInfo* l, IOmx::ComponentInfo const& t) {
+    l->mName = t.mName.c_str();
+    l->mRoles.clear();
+    for (size_t i = 0; i < t.mRoles.size(); ++i) {
+        l->mRoles.push_back(String8(t.mRoles[i].c_str()));
+    }
+    return true;
+}
+
+/**
+ * \brief Convert `OMX_BOOL` to `bool`.
+ *
+ * \param[in] l The source `OMX_BOOL`.
+ * \return The destination `bool`.
+ */
+// convert: OMX_BOOL -> bool
+inline bool toRawBool(OMX_BOOL l) {
+    return l == OMX_FALSE ? false : true;
+}
+
+/**
+ * \brief Convert `bool` to `OMX_BOOL`.
+ *
+ * \param[in] t The source `bool`.
+ * \return The destination `OMX_BOOL`.
+ */
+// convert: bool -> OMX_BOOL
+inline OMX_BOOL toEnumBool(bool t) {
+    return t ? OMX_TRUE : OMX_FALSE;
+}
+
+/**
+ * \brief Convert `OMX_COMMANDTYPE` to `uint32_t`.
+ *
+ * \param[in] l The source `OMX_COMMANDTYPE`.
+ * \return The underlying value of type `uint32_t`.
+ *
+ * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: OMX_COMMANDTYPE -> uint32_t
+inline uint32_t toRawCommandType(OMX_COMMANDTYPE l) {
+    return static_cast<uint32_t>(l);
+}
+
+/**
+ * \brief Convert `uint32_t` to `OMX_COMMANDTYPE`.
+ *
+ * \param[in] t The source `uint32_t`.
+ * \return The corresponding enum value of type `OMX_COMMANDTYPE`.
+ *
+ * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: uint32_t -> OMX_COMMANDTYPE
+inline OMX_COMMANDTYPE toEnumCommandType(uint32_t t) {
+    return static_cast<OMX_COMMANDTYPE>(t);
+}
+
+/**
+ * \brief Convert `OMX_INDEXTYPE` to `uint32_t`.
+ *
+ * \param[in] l The source `OMX_INDEXTYPE`.
+ * \return The underlying value of type `uint32_t`.
+ *
+ * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: OMX_INDEXTYPE -> uint32_t
+inline uint32_t toRawIndexType(OMX_INDEXTYPE l) {
+    return static_cast<uint32_t>(l);
+}
+
+/**
+ * \brief Convert `uint32_t` to `OMX_INDEXTYPE`.
+ *
+ * \param[in] t The source `uint32_t`.
+ * \return The corresponding enum value of type `OMX_INDEXTYPE`.
+ *
+ * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: uint32_t -> OMX_INDEXTYPE
+inline OMX_INDEXTYPE toEnumIndexType(uint32_t t) {
+    return static_cast<OMX_INDEXTYPE>(t);
+}
+
+/**
+ * \brief Convert `IOMX::PortMode` to `PortMode`.
+ *
+ * \param[in] l The source `IOMX::PortMode`.
+ * \return The destination `PortMode`.
+ */
+// convert: IOMX::PortMode -> PortMode
+inline PortMode toHardwarePortMode(IOMX::PortMode l) {
+    return static_cast<PortMode>(l);
+}
+
+/**
+ * \brief Convert `PortMode` to `IOMX::PortMode`.
+ *
+ * \param[in] t The source `PortMode`.
+ * \return The destination `IOMX::PortMode`.
+ */
+// convert: PortMode -> IOMX::PortMode
+inline IOMX::PortMode toIOMXPortMode(PortMode t) {
+    return static_cast<IOMX::PortMode>(t);
+}
+
+/**
+ * \brief Convert `OMX_TICKS` to `uint64_t`.
+ *
+ * \param[in] l The source `OMX_TICKS`.
+ * \return The destination `uint64_t`.
+ */
+// convert: OMX_TICKS -> uint64_t
+inline uint64_t toRawTicks(OMX_TICKS l) {
+#ifndef OMX_SKIP64BIT
+    return static_cast<uint64_t>(l);
+#else
+    return static_cast<uint64_t>(l.nLowPart) |
+            static_cast<uint64_t>(l.nHighPart << 32);
+#endif
+}
+
+/**
+ * \brief Convert `uint64_t` to `OMX_TICKS`.
+ *
+ * \param[in] l The source `uint64_t`.
+ * \return The destination `OMX_TICKS`.
+ */
+// convert: uint64_t -> OMX_TICKS
+inline OMX_TICKS toOMXTicks(uint64_t t) {
+#ifndef OMX_SKIP64BIT
+    return static_cast<OMX_TICKS>(t);
+#else
+    return OMX_TICKS{
+            static_cast<uint32_t>(t & 0xFFFFFFFF),
+            static_cast<uint32_t>(t >> 32)};
+#endif
+}
+
+/**
+ * \brief Wrap `GraphicBuffer` in `AnwBuffer`.
+ *
+ * \param[out] t The wrapper of type `AnwBuffer`.
+ * \param[in] l The source `GraphicBuffer`.
+ */
+// wrap: GraphicBuffer -> AnwBuffer
+inline void wrapAs(AnwBuffer* t, GraphicBuffer const& l) {
+    t->attr.width = l.getWidth();
+    t->attr.height = l.getHeight();
+    t->attr.stride = l.getStride();
+    t->attr.format = static_cast<PixelFormat>(l.getPixelFormat());
+    t->attr.layerCount = l.getLayerCount();
+    t->attr.usage = l.getUsage();
+    t->attr.id = l.getId();
+    t->attr.generationNumber = l.getGenerationNumber();
+    t->nativeHandle = hidl_handle(l.handle);
+}
+
+/**
+ * \brief Convert `AnwBuffer` to `GraphicBuffer`.
+ *
+ * \param[out] l The destination `GraphicBuffer`.
+ * \param[in] t The source `AnwBuffer`.
+ *
+ * This function will duplicate all file descriptors in \p t.
+ */
+// convert: AnwBuffer -> GraphicBuffer
+// Ref: frameworks/native/libs/ui/GraphicBuffer.cpp: GraphicBuffer::flatten
+inline bool convertTo(GraphicBuffer* l, AnwBuffer const& t) {
+    native_handle_t* handle = t.nativeHandle == nullptr ?
+            nullptr : native_handle_clone(t.nativeHandle);
+
+    size_t const numInts = 12 + (handle ? handle->numInts : 0);
+    int32_t* ints = new int32_t[numInts];
+
+    size_t numFds = static_cast<size_t>(handle ? handle->numFds : 0);
+    int* fds = new int[numFds];
+
+    ints[0] = 'GBFR';
+    ints[1] = static_cast<int32_t>(t.attr.width);
+    ints[2] = static_cast<int32_t>(t.attr.height);
+    ints[3] = static_cast<int32_t>(t.attr.stride);
+    ints[4] = static_cast<int32_t>(t.attr.format);
+    ints[5] = static_cast<int32_t>(t.attr.layerCount);
+    ints[6] = static_cast<int32_t>(t.attr.usage);
+    ints[7] = static_cast<int32_t>(t.attr.id >> 32);
+    ints[8] = static_cast<int32_t>(t.attr.id & 0xFFFFFFFF);
+    ints[9] = static_cast<int32_t>(t.attr.generationNumber);
+    ints[10] = 0;
+    ints[11] = 0;
+    if (handle) {
+        ints[10] = static_cast<int32_t>(handle->numFds);
+        ints[11] = static_cast<int32_t>(handle->numInts);
+        int* intsStart = handle->data + handle->numFds;
+        std::copy(handle->data, intsStart, fds);
+        std::copy(intsStart, intsStart + handle->numInts, &ints[12]);
+    }
+
+    void const* constBuffer = static_cast<void const*>(ints);
+    size_t size = numInts * sizeof(int32_t);
+    int const* constFds = static_cast<int const*>(fds);
+    status_t status = l->unflatten(constBuffer, size, constFds, numFds);
+
+    delete [] fds;
+    delete [] ints;
+    native_handle_delete(handle);
+    return status == NO_ERROR;
+}
+
+/**
+ * Conversion functions for types outside media
+ * ============================================
+ *
+ * Some objects in libui and libgui that were made to go through binder calls do
+ * not expose ways to read or write their fields to the public. To pass an
+ * object of this kind through the HIDL boundary, translation functions need to
+ * work around the access restriction by using the publicly available
+ * `flatten()` and `unflatten()` functions.
+ *
+ * All `flatten()` and `unflatten()` overloads follow the same convention as
+ * follows:
+ *
+ *     status_t flatten(ObjectType const& object,
+ *                      [OtherType const& other, ...]
+ *                      void*& buffer, size_t& size,
+ *                      int*& fds, size_t& numFds)
+ *
+ *     status_t unflatten(ObjectType* object,
+ *                        [OtherType* other, ...,]
+ *                        void*& buffer, size_t& size,
+ *                        int*& fds, size_t& numFds)
+ *
+ * The number of `other` parameters varies depending on the `ObjectType`. For
+ * example, in the process of unflattening an object that contains
+ * `hidl_handle`, `other` is needed to hold `native_handle_t` objects that will
+ * be created.
+ *
+ * The last four parameters always work the same way in all overloads of
+ * `flatten()` and `unflatten()`:
+ * - For `flatten()`, `buffer` is the pointer to the non-fd buffer to be filled,
+ *   `size` is the size (in bytes) of the non-fd buffer pointed to by `buffer`,
+ *   `fds` is the pointer to the fd buffer to be filled, and `numFds` is the
+ *   size (in ints) of the fd buffer pointed to by `fds`.
+ * - For `unflatten()`, `buffer` is the pointer to the non-fd buffer to be read
+ *   from, `size` is the size (in bytes) of the non-fd buffer pointed to by
+ *   `buffer`, `fds` is the pointer to the fd buffer to be read from, and
+ *   `numFds` is the size (in ints) of the fd buffer pointed to by `fds`.
+ * - After a successful call to `flatten()` or `unflatten()`, `buffer` and `fds`
+ *   will be advanced, while `size` and `numFds` will be decreased to reflect
+ *   how much storage/data of the two buffers (fd and non-fd) have been used.
+ * - After an unsuccessful call, the values of `buffer`, `size`, `fds` and
+ *   `numFds` are invalid.
+ *
+ * The return value of a successful `flatten()` or `unflatten()` call will be
+ * `OK` (also aliased as `NO_ERROR`). Any other values indicate a failure.
+ *
+ * For each object type that supports flattening, there will be two accompanying
+ * functions: `getFlattenedSize()` and `getFdCount()`. `getFlattenedSize()` will
+ * return the size of the non-fd buffer that the object will need for
+ * flattening. `getFdCount()` will return the size of the fd buffer that the
+ * object will need for flattening.
+ *
+ * The set of these four functions, `getFlattenedSize()`, `getFdCount()`,
+ * `flatten()` and `unflatten()`, are similar to functions of the same name in
+ * the abstract class `Flattenable`. The only difference is that functions in
+ * this file are not member functions of the object type. For example, we write
+ *
+ *     flatten(x, buffer, size, fds, numFds)
+ *
+ * instead of
+ *
+ *     x.flatten(buffer, size, fds, numFds)
+ *
+ * because we cannot modify the type of `x`.
+ *
+ * There is one exception to the naming convention: `hidl_handle` that
+ * represents a fence. The four functions for this "Fence" type have the word
+ * "Fence" attched to their names because the object type, which is
+ * `hidl_handle`, does not carry the special meaning that the object itself can
+ * only contain zero or one file descriptor.
+ */
+
+// Ref: frameworks/native/libs/ui/Fence.cpp
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten a fence.
+ *
+ * \param[in] fence The input fence of type `hidl_handle`.
+ * \return The required size of the flat buffer.
+ *
+ * The current version of this function always returns 4, which is the number of
+ * bytes required to store the number of file descriptors contained in the fd
+ * part of the flat buffer.
+ */
+inline size_t getFenceFlattenedSize(hidl_handle const& /* fence */) {
+    return 4;
+};
+
+/**
+ * \brief Return the number of file descriptors contained in a fence.
+ *
+ * \param[in] fence The input fence of type `hidl_handle`.
+ * \return `0` if \p fence does not contain a valid file descriptor, or `1`
+ * otherwise.
+ */
+inline size_t getFenceFdCount(hidl_handle const& fence) {
+    return native_handle_read_fd(fence) == -1 ? 0 : 1;
+}
+
+/**
+ * \brief Unflatten `Fence` to `hidl_handle`.
+ *
+ * \param[out] fence The destination `hidl_handle`.
+ * \param[out] nh The underlying native handle.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will point to a newly created
+ * native handle, which needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+inline status_t unflattenFence(hidl_handle* fence, native_handle_t** nh,
+        void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+    if (size < 4) {
+        return NO_MEMORY;
+    }
+
+    uint32_t numFdsInHandle;
+    FlattenableUtils::read(buffer, size, numFdsInHandle);
+
+    if (numFdsInHandle > 1) {
+        return BAD_VALUE;
+    }
+
+    if (numFds < numFdsInHandle) {
+        return NO_MEMORY;
+    }
+
+    if (numFdsInHandle) {
+        *nh = native_handle_create_from_fd(*fds);
+        if (*nh == nullptr) {
+            return NO_MEMORY;
+        }
+        *fence = hidl_handle(*nh);
+        ++fds;
+        --numFds;
+    } else {
+        *nh = nullptr;
+        *fence = hidl_handle();
+    }
+
+    return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `hidl_handle` as `Fence`.
+ *
+ * \param[in] t The source `hidl_handle`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+inline status_t flattenFence(hidl_handle const& fence,
+        void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+    if (size < getFenceFlattenedSize(fence) ||
+            numFds < getFenceFdCount(fence)) {
+        return NO_MEMORY;
+    }
+    // Cast to uint32_t since the size of a size_t can vary between 32- and
+    // 64-bit processes
+    FlattenableUtils::write(buffer, size,
+            static_cast<uint32_t>(getFenceFdCount(fence)));
+    int fd = native_handle_read_fd(fence);
+    if (fd != -1) {
+        *fds = fd;
+        ++fds;
+        --numFds;
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Wrap `Fence` in `hidl_handle`.
+ *
+ * \param[out] t The wrapper of type `hidl_handle`.
+ * \param[out] nh The native handle pointed to by \p t.
+ * \param[in] l The source `Fence`.
+ *
+ * On success, \p nh will hold a newly created native handle, which must be
+ * deleted manually with `native_handle_delete()` afterwards.
+ */
+// wrap: Fence -> hidl_handle
+inline bool wrapAs(hidl_handle* t, native_handle_t** nh, Fence const& l) {
+    size_t const baseSize = l.getFlattenedSize();
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    size_t const baseNumFds = l.getFdCount();
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = static_cast<int*>(baseFds.get());
+    size_t numFds = baseNumFds;
+    if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (unflattenFence(t, nh, constBuffer, size, constFds, numFds)
+            != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+/**
+ * \brief Convert `hidl_handle` to `Fence`.
+ *
+ * \param[out] l The destination `Fence`. `l` must not have been used
+ * (`l->isValid()` must return `false`) before this function is called.
+ * \param[in] t The source `hidl_handle`.
+ *
+ * If \p t contains a valid file descriptor, it will be duplicated.
+ */
+// convert: hidl_handle -> Fence
+inline bool convertTo(Fence* l, hidl_handle const& t) {
+    int fd = native_handle_read_fd(t);
+    if (fd != -1) {
+        fd = dup(fd);
+        if (fd == -1) {
+            return false;
+        }
+    }
+    native_handle_t* nh = native_handle_create_from_fd(fd);
+    if (nh == nullptr) {
+        if (fd != -1) {
+            close(fd);
+        }
+        return false;
+    }
+
+    size_t const baseSize = getFenceFlattenedSize(t);
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        native_handle_delete(nh);
+        return false;
+    }
+
+    size_t const baseNumFds = getFenceFdCount(t);
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        native_handle_delete(nh);
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = static_cast<int*>(baseFds.get());
+    size_t numFds = baseNumFds;
+    if (flattenFence(hidl_handle(nh), buffer, size, fds, numFds) != NO_ERROR) {
+        native_handle_delete(nh);
+        return false;
+    }
+    native_handle_delete(nh);
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+// Ref: frameworks/native/libs/ui/FenceTime.cpp: FenceTime::Snapshot
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `FenceTimeSnapshot`.
+ *
+ * \param[in] t The input `FenceTimeSnapshot`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(
+        IOmxBufferProducer::FenceTimeSnapshot const& t) {
+    constexpr size_t min = sizeof(t.state);
+    switch (t.state) {
+        case IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY:
+            return min;
+        case IOmxBufferProducer::FenceTimeSnapshot::State::FENCE:
+            return min + getFenceFlattenedSize(t.fence);
+        case IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
+            return min + sizeof(
+                    ::android::FenceTime::Snapshot::signalTime);
+    }
+    return 0;
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `FenceTimeSnapshot`.
+ *
+ * \param[in] t The input `FenceTimeSnapshot`.
+ * \return The number of file descriptors contained in \p snapshot.
+ */
+inline size_t getFdCount(
+        IOmxBufferProducer::FenceTimeSnapshot const& t) {
+    return t.state ==
+            IOmxBufferProducer::FenceTimeSnapshot::State::FENCE ?
+            getFenceFdCount(t.fence) : 0;
+}
+
+/**
+ * \brief Flatten `FenceTimeSnapshot`.
+ *
+ * \param[in] t The source `FenceTimeSnapshot`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate the file descriptor in `t.fence` if `t.state ==
+ * FENCE`.
+ */
+inline status_t flatten(IOmxBufferProducer::FenceTimeSnapshot const& t,
+        void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+    if (size < getFlattenedSize(t)) {
+        return NO_MEMORY;
+    }
+
+    switch (t.state) {
+        case IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY:
+            FlattenableUtils::write(buffer, size,
+                    ::android::FenceTime::Snapshot::State::EMPTY);
+            return NO_ERROR;
+        case IOmxBufferProducer::FenceTimeSnapshot::State::FENCE:
+            FlattenableUtils::write(buffer, size,
+                    ::android::FenceTime::Snapshot::State::FENCE);
+            return flattenFence(t.fence, buffer, size, fds, numFds);
+        case IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
+            FlattenableUtils::write(buffer, size,
+                    ::android::FenceTime::Snapshot::State::SIGNAL_TIME);
+            FlattenableUtils::write(buffer, size, t.signalTimeNs);
+            return NO_ERROR;
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Unflatten `FenceTimeSnapshot`.
+ *
+ * \param[out] t The destination `FenceTimeSnapshot`.
+ * \param[out] nh The underlying native handle.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR` and the constructed snapshot contains a
+ * file descriptor, \p nh will be created to hold that file descriptor. In this
+ * case, \p nh needs to be deleted with `native_handle_delete()` afterwards.
+ */
+inline status_t unflatten(
+        IOmxBufferProducer::FenceTimeSnapshot* t, native_handle_t** nh,
+        void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+    if (size < sizeof(t->state)) {
+        return NO_MEMORY;
+    }
+
+    ::android::FenceTime::Snapshot::State state;
+    FlattenableUtils::read(buffer, size, state);
+    switch (state) {
+        case ::android::FenceTime::Snapshot::State::EMPTY:
+            t->state = IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY;
+            return NO_ERROR;
+        case ::android::FenceTime::Snapshot::State::FENCE:
+            t->state = IOmxBufferProducer::FenceTimeSnapshot::State::FENCE;
+            return unflattenFence(&t->fence, nh, buffer, size, fds, numFds);
+        case ::android::FenceTime::Snapshot::State::SIGNAL_TIME:
+            t->state = IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME;
+            if (size < sizeof(t->signalTimeNs)) {
+                return NO_MEMORY;
+            }
+            FlattenableUtils::read(buffer, size, t->signalTimeNs);
+            return NO_ERROR;
+    }
+    return NO_ERROR;
+}
+
+// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventsDelta
+
+/**
+ * \brief Return a lower bound on the size of the non-fd buffer required to
+ * flatten `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return A lower bound on the size of the flat buffer.
+ */
+constexpr size_t minFlattenedSize(
+        IOmxBufferProducer::FrameEventsDelta const& /* t */) {
+    return sizeof(uint64_t) + // mFrameNumber
+            sizeof(uint8_t) + // mIndex
+            sizeof(uint8_t) + // mAddPostCompositeCalled
+            sizeof(uint8_t) + // mAddRetireCalled
+            sizeof(uint8_t) + // mAddReleaseCalled
+            sizeof(nsecs_t) + // mPostedTime
+            sizeof(nsecs_t) + // mRequestedPresentTime
+            sizeof(nsecs_t) + // mLatchTime
+            sizeof(nsecs_t) + // mFirstRefreshStartTime
+            sizeof(nsecs_t); // mLastRefreshStartTime
+}
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(
+        IOmxBufferProducer::FrameEventsDelta const& t) {
+    return minFlattenedSize(t) +
+            getFlattenedSize(t.gpuCompositionDoneFence) +
+            getFlattenedSize(t.displayPresentFence) +
+            getFlattenedSize(t.displayRetireFence) +
+            getFlattenedSize(t.releaseFence);
+};
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return The number of file descriptors contained in \p t.
+ */
+inline size_t getFdCount(
+        IOmxBufferProducer::FrameEventsDelta const& t) {
+    return getFdCount(t.gpuCompositionDoneFence) +
+            getFdCount(t.displayPresentFence) +
+            getFdCount(t.displayRetireFence) +
+            getFdCount(t.releaseFence);
+};
+
+/**
+ * \brief Unflatten `FrameEventsDelta`.
+ *
+ * \param[out] t The destination `FrameEventsDelta`.
+ * \param[out] nh The underlying array of native handles.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will have length 4, and it will be
+ * populated with `nullptr` or newly created handles. Each non-null slot in \p
+ * nh will need to be deleted manually with `native_handle_delete()`.
+ */
+inline status_t unflatten(IOmxBufferProducer::FrameEventsDelta* t,
+        std::vector<native_handle_t*>* nh,
+        void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+    if (size < minFlattenedSize(*t)) {
+        return NO_MEMORY;
+    }
+    FlattenableUtils::read(buffer, size, t->frameNumber);
+
+    // These were written as uint8_t for alignment.
+    uint8_t temp = 0;
+    FlattenableUtils::read(buffer, size, temp);
+    size_t index = static_cast<size_t>(temp);
+    if (index >= ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+        return BAD_VALUE;
+    }
+    t->index = static_cast<uint32_t>(index);
+
+    FlattenableUtils::read(buffer, size, temp);
+    t->addPostCompositeCalled = static_cast<bool>(temp);
+    FlattenableUtils::read(buffer, size, temp);
+    t->addRetireCalled = static_cast<bool>(temp);
+    FlattenableUtils::read(buffer, size, temp);
+    t->addReleaseCalled = static_cast<bool>(temp);
+
+    FlattenableUtils::read(buffer, size, t->postedTimeNs);
+    FlattenableUtils::read(buffer, size, t->requestedPresentTimeNs);
+    FlattenableUtils::read(buffer, size, t->latchTimeNs);
+    FlattenableUtils::read(buffer, size, t->firstRefreshStartTimeNs);
+    FlattenableUtils::read(buffer, size, t->lastRefreshStartTimeNs);
+    FlattenableUtils::read(buffer, size, t->dequeueReadyTime);
+
+    // Fences
+    IOmxBufferProducer::FenceTimeSnapshot* tSnapshot[4];
+    tSnapshot[0] = &t->gpuCompositionDoneFence;
+    tSnapshot[1] = &t->displayPresentFence;
+    tSnapshot[2] = &t->displayRetireFence;
+    tSnapshot[3] = &t->releaseFence;
+    nh->resize(4);
+    for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
+        status_t status = unflatten(
+                tSnapshot[snapshotIndex], &((*nh)[snapshotIndex]),
+                buffer, size, fds, numFds);
+        if (status != NO_ERROR) {
+            while (snapshotIndex > 0) {
+                --snapshotIndex;
+                if ((*nh)[snapshotIndex] != nullptr) {
+                    native_handle_delete((*nh)[snapshotIndex]);
+                }
+            }
+            return status;
+        }
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `FrameEventsDelta`.
+ *
+ * \param[in] t The source `FrameEventsDelta`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate file descriptors contained in \p t.
+ */
+// Ref: frameworks/native/libs/gui/FrameTimestamp.cpp:
+//      FrameEventsDelta::flatten
+inline status_t flatten(IOmxBufferProducer::FrameEventsDelta const& t,
+        void*& buffer, size_t& size, int*& fds, size_t numFds) {
+    // Check that t.index is within a valid range.
+    if (t.index >= static_cast<uint32_t>(FrameEventHistory::MAX_FRAME_HISTORY)
+            || t.index > std::numeric_limits<uint8_t>::max()) {
+        return BAD_VALUE;
+    }
+
+    FlattenableUtils::write(buffer, size, t.frameNumber);
+
+    // These are static_cast to uint8_t for alignment.
+    FlattenableUtils::write(buffer, size, static_cast<uint8_t>(t.index));
+    FlattenableUtils::write(
+            buffer, size, static_cast<uint8_t>(t.addPostCompositeCalled));
+    FlattenableUtils::write(
+            buffer, size, static_cast<uint8_t>(t.addRetireCalled));
+    FlattenableUtils::write(
+            buffer, size, static_cast<uint8_t>(t.addReleaseCalled));
+
+    FlattenableUtils::write(buffer, size, t.postedTimeNs);
+    FlattenableUtils::write(buffer, size, t.requestedPresentTimeNs);
+    FlattenableUtils::write(buffer, size, t.latchTimeNs);
+    FlattenableUtils::write(buffer, size, t.firstRefreshStartTimeNs);
+    FlattenableUtils::write(buffer, size, t.lastRefreshStartTimeNs);
+    FlattenableUtils::write(buffer, size, t.dequeueReadyTime);
+
+    // Fences
+    IOmxBufferProducer::FenceTimeSnapshot const* tSnapshot[4];
+    tSnapshot[0] = &t.gpuCompositionDoneFence;
+    tSnapshot[1] = &t.displayPresentFence;
+    tSnapshot[2] = &t.displayRetireFence;
+    tSnapshot[3] = &t.releaseFence;
+    for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
+        status_t status = flatten(
+                *(tSnapshot[snapshotIndex]), buffer, size, fds, numFds);
+        if (status != NO_ERROR) {
+            return status;
+        }
+    }
+    return NO_ERROR;
+}
+
+// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventHistoryDelta
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[in] t The input `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(
+        IOmxBufferProducer::FrameEventHistoryDelta const& t) {
+    size_t size = 4;
+    for (size_t i = 0; i < t.size(); ++i) {
+        size += getFlattenedSize(t[i]);
+    }
+    return size;
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[in] t The input `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * \return The number of file descriptors contained in \p t.
+ */
+inline size_t getFdCount(
+        IOmxBufferProducer::FrameEventHistoryDelta const& t) {
+    size_t numFds = 0;
+    for (size_t i = 0; i < t.size(); ++i) {
+        numFds += getFdCount(t[i]);
+    }
+    return numFds;
+}
+
+/**
+ * \brief Unflatten `FrameEventHistoryDelta`.
+ *
+ * \param[out] t The destination `FrameEventHistoryDelta`.
+ * \param[out] nh The underlying array of arrays of native handles.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will be populated with `nullptr` or
+ * newly created handles. The second dimension of \p nh will be 4. Each non-null
+ * slot in \p nh will need to be deleted manually with `native_handle_delete()`.
+ */
+inline status_t unflatten(
+        IOmxBufferProducer::FrameEventHistoryDelta* t,
+        std::vector<std::vector<native_handle_t*> >* nh,
+        void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+    if (size < 4) {
+        return NO_MEMORY;
+    }
+
+    uint32_t deltaCount = 0;
+    FlattenableUtils::read(buffer, size, deltaCount);
+    if (static_cast<size_t>(deltaCount) >
+            ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+        return BAD_VALUE;
+    }
+    t->resize(deltaCount);
+    nh->resize(deltaCount);
+    for (size_t deltaIndex = 0; deltaIndex < deltaCount; ++deltaIndex) {
+        status_t status = unflatten(
+                &((*t)[deltaIndex]), &((*nh)[deltaIndex]),
+                buffer, size, fds, numFds);
+        if (status != NO_ERROR) {
+            return status;
+        }
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `FrameEventHistoryDelta`.
+ *
+ * \param[in] t The source `FrameEventHistoryDelta`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate file descriptors contained in \p t.
+ */
+inline status_t flatten(
+        IOmxBufferProducer::FrameEventHistoryDelta const& t,
+        void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+    if (t.size() > ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+        return BAD_VALUE;
+    }
+    if (size < getFlattenedSize(t)) {
+        return NO_MEMORY;
+    }
+
+    FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.size()));
+    for (size_t deltaIndex = 0; deltaIndex < t.size(); ++deltaIndex) {
+        status_t status = flatten(t[deltaIndex], buffer, size, fds, numFds);
+        if (status != NO_ERROR) {
+            return status;
+        }
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Wrap `::android::FrameEventHistoryData` in
+ * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[out] t The wrapper of type
+ * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * \param[out] nh The array of array of native handles that are referred to by
+ * members of \p t.
+ * \param[in] l The source `::android::FrameEventHistoryDelta`.
+ *
+ * On success, each member of \p nh will be either `nullptr` or a newly created
+ * native handle. All the non-`nullptr` elements must be deleted individually
+ * with `native_handle_delete()`.
+ */
+inline bool wrapAs(IOmxBufferProducer::FrameEventHistoryDelta* t,
+        std::vector<std::vector<native_handle_t*> >* nh,
+        ::android::FrameEventHistoryDelta const& l) {
+
+    size_t const baseSize = l.getFlattenedSize();
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    size_t const baseNumFds = l.getFdCount();
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = baseFds.get();
+    size_t numFds = baseNumFds;
+    if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+/**
+ * \brief Convert `IOmxBufferProducer::FrameEventHistoryDelta` to
+ * `::android::FrameEventHistoryDelta`.
+ *
+ * \param[out] l The destination `::android::FrameEventHistoryDelta`.
+ * \param[in] t The source `IOmxBufferProducer::FrameEventHistoryDelta`.
+ *
+ * This function will duplicate all file descriptors contained in \p t.
+ */
+inline bool convertTo(
+        ::android::FrameEventHistoryDelta* l,
+        IOmxBufferProducer::FrameEventHistoryDelta const& t) {
+
+    size_t const baseSize = getFlattenedSize(t);
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    size_t const baseNumFds = getFdCount(t);
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = static_cast<int*>(baseFds.get());
+    size_t numFds = baseNumFds;
+    if (flatten(t, buffer, size, fds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+// Ref: frameworks/native/libs/ui/Region.cpp
+
+/**
+ * \brief Return the size of the buffer required to flatten `Region`.
+ *
+ * \param[in] t The input `Region`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(Region const& t) {
+    return sizeof(uint32_t) + t.size() * sizeof(::android::Rect);
+}
+
+/**
+ * \brief Unflatten `Region`.
+ *
+ * \param[out] t The destination `Region`.
+ * \param[in,out] buffer The pointer to the flat buffer.
+ * \param[in,out] size The size of the flat buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+inline status_t unflatten(Region* t, void const*& buffer, size_t& size) {
+    if (size < sizeof(uint32_t)) {
+        return NO_MEMORY;
+    }
+
+    uint32_t numRects = 0;
+    FlattenableUtils::read(buffer, size, numRects);
+    if (size < numRects * sizeof(Rect)) {
+        return NO_MEMORY;
+    }
+    if (numRects > (UINT32_MAX / sizeof(Rect))) {
+        return NO_MEMORY;
+    }
+
+    t->resize(numRects);
+    for (size_t r = 0; r < numRects; ++r) {
+        ::android::Rect rect(::android::Rect::EMPTY_RECT);
+        status_t status = rect.unflatten(buffer, size);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        FlattenableUtils::advance(buffer, size, sizeof(rect));
+        (*t)[r] = Rect{
+                static_cast<int32_t>(rect.left),
+                static_cast<int32_t>(rect.top),
+                static_cast<int32_t>(rect.right),
+                static_cast<int32_t>(rect.bottom)};
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `Region`.
+ *
+ * \param[in] t The source `Region`.
+ * \param[in,out] buffer The pointer to the flat buffer.
+ * \param[in,out] size The size of the flat buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+inline status_t flatten(Region const& t, void*& buffer, size_t& size) {
+    if (size < getFlattenedSize(t)) {
+        return NO_MEMORY;
+    }
+
+    FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.size()));
+    for (size_t r = 0; r < t.size(); ++r) {
+        ::android::Rect rect(
+                static_cast<int32_t>(t[r].left),
+                static_cast<int32_t>(t[r].top),
+                static_cast<int32_t>(t[r].right),
+                static_cast<int32_t>(t[r].bottom));
+        status_t status = rect.flatten(buffer, size);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        FlattenableUtils::advance(buffer, size, sizeof(rect));
+    }
+    return NO_ERROR;
+}
+
+/**
+ * \brief Convert `::android::Region` to `Region`.
+ *
+ * \param[out] t The destination `Region`.
+ * \param[in] l The source `::android::Region`.
+ */
+// convert: ::android::Region -> Region
+inline bool convertTo(Region* t, ::android::Region const& l) {
+    size_t const baseSize = l.getFlattenedSize();
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    if (l.flatten(buffer, size) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    if (unflatten(t, constBuffer, size) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+/**
+ * \brief Convert `Region` to `::android::Region`.
+ *
+ * \param[out] l The destination `::android::Region`.
+ * \param[in] t The source `Region`.
+ */
+// convert: Region -> ::android::Region
+inline bool convertTo(::android::Region* l, Region const& t) {
+    size_t const baseSize = getFlattenedSize(t);
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    if (flatten(t, buffer, size) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    if (l->unflatten(constBuffer, size) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+// Ref: frameworks/native/libs/gui/IGraphicBufferProducer.cpp:
+//      IGraphicBufferProducer::QueueBufferInput
+
+/**
+ * \brief Return a lower bound on the size of the buffer required to flatten
+ * `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
+ * \return A lower bound on the size of the flat buffer.
+ */
+constexpr size_t minFlattenedSize(
+        IOmxBufferProducer::QueueBufferInput const& /* t */) {
+    return sizeof(int64_t) + // timestamp
+            sizeof(int) + // isAutoTimestamp
+            sizeof(android_dataspace) + // dataSpace
+            sizeof(::android::Rect) + // crop
+            sizeof(int) + // scalingMode
+            sizeof(uint32_t) + // transform
+            sizeof(uint32_t) + // stickyTransform
+            sizeof(bool); // getFrameTimestamps
+}
+
+/**
+ * \brief Return the size of the buffer required to flatten
+ * `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(IOmxBufferProducer::QueueBufferInput const& t) {
+    return minFlattenedSize(t) +
+            getFenceFlattenedSize(t.fence) +
+            getFlattenedSize(t.surfaceDamage);
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
+ * \return The number of file descriptors contained in \p t.
+ */
+inline size_t getFdCount(
+        IOmxBufferProducer::QueueBufferInput const& t) {
+    return getFenceFdCount(t.fence);
+}
+
+/**
+ * \brief Flatten `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The source `IOmxBufferProducer::QueueBufferInput`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate the file descriptor in `t.fence`. */
+inline status_t flatten(IOmxBufferProducer::QueueBufferInput const& t,
+        void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+    if (size < getFlattenedSize(t)) {
+        return NO_MEMORY;
+    }
+
+    FlattenableUtils::write(buffer, size, t.timestamp);
+    FlattenableUtils::write(buffer, size, static_cast<int>(t.isAutoTimestamp));
+    FlattenableUtils::write(buffer, size,
+            static_cast<android_dataspace_t>(t.dataSpace));
+    FlattenableUtils::write(buffer, size, ::android::Rect(
+            static_cast<int32_t>(t.crop.left),
+            static_cast<int32_t>(t.crop.top),
+            static_cast<int32_t>(t.crop.right),
+            static_cast<int32_t>(t.crop.bottom)));
+    FlattenableUtils::write(buffer, size, static_cast<int>(t.scalingMode));
+    FlattenableUtils::write(buffer, size, t.transform);
+    FlattenableUtils::write(buffer, size, t.stickyTransform);
+    FlattenableUtils::write(buffer, size, t.getFrameTimestamps);
+
+    status_t status = flattenFence(t.fence, buffer, size, fds, numFds);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    return flatten(t.surfaceDamage, buffer, size);
+}
+
+/**
+ * \brief Unflatten `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] t The destination `IOmxBufferProducer::QueueBufferInput`.
+ * \param[out] nh The underlying native handle for `t->fence`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR` and `t->fence` contains a valid file
+ * descriptor, \p nh will be a newly created native handle holding that file
+ * descriptor. \p nh needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+inline status_t unflatten(
+        IOmxBufferProducer::QueueBufferInput* t, native_handle_t** nh,
+        void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+    if (size < minFlattenedSize(*t)) {
+        return NO_MEMORY;
+    }
+
+    FlattenableUtils::read(buffer, size, t->timestamp);
+    int lIsAutoTimestamp;
+    FlattenableUtils::read(buffer, size, lIsAutoTimestamp);
+    t->isAutoTimestamp = static_cast<int32_t>(lIsAutoTimestamp);
+    android_dataspace_t lDataSpace;
+    FlattenableUtils::read(buffer, size, lDataSpace);
+    t->dataSpace = static_cast<Dataspace>(lDataSpace);
+    Rect lCrop;
+    FlattenableUtils::read(buffer, size, lCrop);
+    t->crop = Rect{
+            static_cast<int32_t>(lCrop.left),
+            static_cast<int32_t>(lCrop.top),
+            static_cast<int32_t>(lCrop.right),
+            static_cast<int32_t>(lCrop.bottom)};
+    int lScalingMode;
+    FlattenableUtils::read(buffer, size, lScalingMode);
+    t->scalingMode = static_cast<int32_t>(lScalingMode);
+    FlattenableUtils::read(buffer, size, t->transform);
+    FlattenableUtils::read(buffer, size, t->stickyTransform);
+    FlattenableUtils::read(buffer, size, t->getFrameTimestamps);
+
+    status_t status = unflattenFence(&(t->fence), nh,
+            buffer, size, fds, numFds);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    return unflatten(&(t->surfaceDamage), buffer, size);
+}
+
+/**
+ * \brief Wrap `IGraphicBufferProducer::QueueBufferInput` in
+ * `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] t The wrapper of type
+ * `IOmxBufferProducer::QueueBufferInput`.
+ * \param[out] nh The underlying native handle for `t->fence`.
+ * \param[in] l The source `IGraphicBufferProducer::QueueBufferInput`.
+ *
+ * If the return value is `true` and `t->fence` contains a valid file
+ * descriptor, \p nh will be a newly created native handle holding that file
+ * descriptor. \p nh needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+inline bool wrapAs(
+        IOmxBufferProducer::QueueBufferInput* t,
+        native_handle_t** nh,
+        IGraphicBufferProducer::QueueBufferInput const& l) {
+
+    size_t const baseSize = l.getFlattenedSize();
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    size_t const baseNumFds = l.getFdCount();
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = baseFds.get();
+    size_t numFds = baseNumFds;
+    if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+/**
+ * \brief Convert `IOmxBufferProducer::QueueBufferInput` to
+ * `IGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] l The destination `IGraphicBufferProducer::QueueBufferInput`.
+ * \param[in] t The source `IOmxBufferProducer::QueueBufferInput`.
+ *
+ * If `t.fence` has a valid file descriptor, it will be duplicated.
+ */
+inline bool convertTo(
+        IGraphicBufferProducer::QueueBufferInput* l,
+        IOmxBufferProducer::QueueBufferInput const& t) {
+
+    size_t const baseSize = getFlattenedSize(t);
+    std::unique_ptr<uint8_t[]> baseBuffer(
+            new (std::nothrow) uint8_t[baseSize]);
+    if (!baseBuffer) {
+        return false;
+    }
+
+    size_t const baseNumFds = getFdCount(t);
+    std::unique_ptr<int[]> baseFds(
+            new (std::nothrow) int[baseNumFds]);
+    if (!baseFds) {
+        return false;
+    }
+
+    void* buffer = static_cast<void*>(baseBuffer.get());
+    size_t size = baseSize;
+    int* fds = baseFds.get();
+    size_t numFds = baseNumFds;
+    if (flatten(t, buffer, size, fds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+    size = baseSize;
+    int const* constFds = static_cast<int const*>(baseFds.get());
+    numFds = baseNumFds;
+    if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+        return false;
+    }
+
+    return true;
+}
+
+// Ref: frameworks/native/libs/gui/IGraphicBufferProducer.cpp:
+//      IGraphicBufferProducer::QueueBufferOutput
+
+/**
+ * \brief Wrap `IGraphicBufferProducer::QueueBufferOutput` in
+ * `IOmxBufferProducer::QueueBufferOutput`.
+ *
+ * \param[out] t The wrapper of type
+ * `IOmxBufferProducer::QueueBufferOutput`.
+ * \param[out] nh The array of array of native handles that are referred to by
+ * members of \p t.
+ * \param[in] l The source `IGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * On success, each member of \p nh will be either `nullptr` or a newly created
+ * native handle. All the non-`nullptr` elements must be deleted individually
+ * with `native_handle_delete()`.
+ */
+// wrap: IGraphicBufferProducer::QueueBufferOutput ->
+// IOmxBufferProducer::QueueBufferOutput
+inline bool wrapAs(IOmxBufferProducer::QueueBufferOutput* t,
+        std::vector<std::vector<native_handle_t*> >* nh,
+        IGraphicBufferProducer::QueueBufferOutput const& l) {
+    if (!wrapAs(&(t->frameTimestamps), nh, l.frameTimestamps)) {
+        return false;
+    }
+    t->width = l.width;
+    t->height = l.height;
+    t->transformHint = l.transformHint;
+    t->numPendingBuffers = l.numPendingBuffers;
+    t->nextFrameNumber = l.nextFrameNumber;
+    return true;
+}
+
+/**
+ * \brief Convert `IOmxBufferProducer::QueueBufferOutput` to
+ * `IGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * \param[out] l The destination `IGraphicBufferProducer::QueueBufferOutput`.
+ * \param[in] t The source `IOmxBufferProducer::QueueBufferOutput`.
+ *
+ * This function will duplicate all file descriptors contained in \p t.
+ */
+// convert: IOmxBufferProducer::QueueBufferOutput ->
+// IGraphicBufferProducer::QueueBufferOutput
+inline bool convertTo(
+        IGraphicBufferProducer::QueueBufferOutput* l,
+        IOmxBufferProducer::QueueBufferOutput const& t) {
+    if (!convertTo(&(l->frameTimestamps), t.frameTimestamps)) {
+        return false;
+    }
+    l->width = t.width;
+    l->height = t.height;
+    l->transformHint = t.transformHint;
+    l->numPendingBuffers = t.numPendingBuffers;
+    l->nextFrameNumber = t.nextFrameNumber;
+    return true;
+}
+
+/**
+ * \brief Convert `IGraphicBufferProducer::DisconnectMode` to
+ * `IOmxBufferProducer::DisconnectMode`.
+ *
+ * \param[in] l The source `IGraphicBufferProducer::DisconnectMode`.
+ * \return The corresponding `IOmxBufferProducer::DisconnectMode`.
+ */
+inline IOmxBufferProducer::DisconnectMode toOmxDisconnectMode(
+        IGraphicBufferProducer::DisconnectMode l) {
+    switch (l) {
+        case IGraphicBufferProducer::DisconnectMode::Api:
+            return IOmxBufferProducer::DisconnectMode::API;
+        case IGraphicBufferProducer::DisconnectMode::AllLocal:
+            return IOmxBufferProducer::DisconnectMode::ALL_LOCAL;
+    }
+    return IOmxBufferProducer::DisconnectMode::API;
+}
+
+/**
+ * \brief Convert `IOmxBufferProducer::DisconnectMode` to
+ * `IGraphicBufferProducer::DisconnectMode`.
+ *
+ * \param[in] l The source `IOmxBufferProducer::DisconnectMode`.
+ * \return The corresponding `IGraphicBufferProducer::DisconnectMode`.
+ */
+inline IGraphicBufferProducer::DisconnectMode toGuiDisconnectMode(
+        IOmxBufferProducer::DisconnectMode t) {
+    switch (t) {
+        case IOmxBufferProducer::DisconnectMode::API:
+            return IGraphicBufferProducer::DisconnectMode::Api;
+        case IOmxBufferProducer::DisconnectMode::ALL_LOCAL:
+            return IGraphicBufferProducer::DisconnectMode::AllLocal;
+    }
+    return IGraphicBufferProducer::DisconnectMode::Api;
+}
+
+}  // namespace utils
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
diff --git a/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.cpp b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.cpp
similarity index 85%
copy from media/libstagefright/omx/hal/1.0/WGraphicBufferSource.cpp
copy to media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.cpp
index 0ec31f2..037e9b2 100644
--- a/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #include "WGraphicBufferSource.h"
 #include "Conversion.h"
 #include "WOmxNode.h"
@@ -8,7 +24,7 @@
 namespace media {
 namespace omx {
 namespace V1_0 {
-namespace implementation {
+namespace utils {
 
 using android::ColorUtils;
 
@@ -116,7 +132,7 @@
     return toHardwareStatus(mBase->signalEndOfInputStream());
 }
 
-}  // namespace implementation
+}  // namespace utils
 }  // namespace V1_0
 }  // namespace omx
 }  // namespace media
diff --git a/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.h b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.h
similarity index 84%
copy from media/libstagefright/omx/hal/1.0/WGraphicBufferSource.h
copy to media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.h
index 66977ad..17a4486 100644
--- a/media/libstagefright/omx/hal/1.0/WGraphicBufferSource.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WGraphicBufferSource.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
 #define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
 
@@ -17,7 +33,7 @@
 namespace media {
 namespace omx {
 namespace V1_0 {
-namespace implementation {
+namespace utils {
 
 using ::android::hardware::graphics::common::V1_0::Dataspace;
 using ::android::hardware::media::omx::V1_0::ColorAspects;
@@ -82,7 +98,7 @@
     Return<void> signalEndOfInputStream() override;
 };
 
-}  // namespace implementation
+}  // namespace utils
 }  // namespace V1_0
 }  // namespace omx
 }  // namespace media
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmx.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmx.cpp
new file mode 100644
index 0000000..07c9255
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmx.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WOmx.h"
+#include "WOmxNode.h"
+#include "WOmxObserver.h"
+#include "WOmxBufferProducer.h"
+#include "WGraphicBufferSource.h"
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+// LWOmx
+LWOmx::LWOmx(sp<IOmx> const& base) : mBase(base) {
+}
+
+status_t LWOmx::listNodes(List<IOMX::ComponentInfo>* list) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->listNodes(
+            [&fnStatus, list](
+                    Status status,
+                    hidl_vec<IOmx::ComponentInfo> const& nodeList) {
+                fnStatus = toStatusT(status);
+                list->clear();
+                for (size_t i = 0; i < nodeList.size(); ++i) {
+                    auto newInfo = list->insert(
+                            list->end(), IOMX::ComponentInfo());
+                    convertTo(&*newInfo, nodeList[i]);
+                }
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmx::allocateNode(
+        char const* name,
+        sp<IOMXObserver> const& observer,
+        sp<IOMXNode>* omxNode) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->allocateNode(
+            name, new TWOmxObserver(observer),
+            [&fnStatus, omxNode](Status status, sp<IOmxNode> const& node) {
+                fnStatus = toStatusT(status);
+                *omxNode = new LWOmxNode(node);
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmx::createInputSurface(
+        sp<::android::IGraphicBufferProducer>* bufferProducer,
+        sp<::android::IGraphicBufferSource>* bufferSource) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->createInputSurface(
+            [&fnStatus, bufferProducer, bufferSource] (
+                    Status status,
+                    sp<IOmxBufferProducer> const& tProducer,
+                    sp<IGraphicBufferSource> const& tSource) {
+                fnStatus = toStatusT(status);
+                *bufferProducer = new LWOmxBufferProducer(tProducer);
+                *bufferSource = new LWGraphicBufferSource(tSource);
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+::android::IBinder* LWOmx::onAsBinder() {
+    return nullptr;
+}
+
+// TWOmx
+TWOmx::TWOmx(sp<IOMX> const& base) : mBase(base) {
+}
+
+Return<void> TWOmx::listNodes(listNodes_cb _hidl_cb) {
+    List<IOMX::ComponentInfo> lList;
+    Status status = toStatus(mBase->listNodes(&lList));
+
+    hidl_vec<IOmx::ComponentInfo> tList;
+    tList.resize(lList.size());
+    size_t i = 0;
+    for (auto const& lInfo : lList) {
+        convertTo(&(tList[i++]), lInfo);
+    }
+    _hidl_cb(status, tList);
+    return Void();
+}
+
+Return<void> TWOmx::allocateNode(
+        const hidl_string& name,
+        const sp<IOmxObserver>& observer,
+        allocateNode_cb _hidl_cb) {
+    sp<IOMXNode> omxNode;
+    Status status = toStatus(mBase->allocateNode(
+            name, new LWOmxObserver(observer), &omxNode));
+    _hidl_cb(status, new TWOmxNode(omxNode));
+    return Void();
+}
+
+Return<void> TWOmx::createInputSurface(createInputSurface_cb _hidl_cb) {
+    sp<::android::IGraphicBufferProducer> lProducer;
+    sp<::android::IGraphicBufferSource> lSource;
+    status_t status = mBase->createInputSurface(&lProducer, &lSource);
+    _hidl_cb(toStatus(status),
+             new TWOmxBufferProducer(lProducer),
+             new TWGraphicBufferSource(lSource));
+    return Void();
+}
+
+}  // namespace utils
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/WOmx.h b/media/libstagefright/omx/hal/1.0/utils/WOmx.h
similarity index 74%
copy from media/libstagefright/omx/hal/1.0/WOmx.h
copy to media/libstagefright/omx/hal/1.0/utils/WOmx.h
index b07c4f2..26affad 100644
--- a/media/libstagefright/omx/hal/1.0/WOmx.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmx.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMX_H
 #define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMX_H
 
@@ -12,7 +28,7 @@
 namespace media {
 namespace omx {
 namespace V1_0 {
-namespace implementation {
+namespace utils {
 
 using ::android::hardware::media::omx::V1_0::IOmx;
 using ::android::hardware::media::omx::V1_0::IOmxNode;
@@ -62,10 +78,11 @@
             const hidl_string& name,
             const sp<IOmxObserver>& observer,
             allocateNode_cb _hidl_cb) override;
+    Return<void> createInputSurface(createInputSurface_cb _hidl_cb) override;
 
 };
 
-}  // namespace implementation
+}  // namespace utils
 }  // namespace V1_0
 }  // namespace omx
 }  // namespace media
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.cpp
new file mode 100644
index 0000000..49f2706
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.cpp
@@ -0,0 +1,594 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WOmxBufferProducer.h"
+#include "WOmxProducerListener.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+// TWOmxBufferProducer
+TWOmxBufferProducer::TWOmxBufferProducer(
+        sp<IGraphicBufferProducer> const& base):
+    mBase(base) {
+}
+
+Return<void> TWOmxBufferProducer::requestBuffer(
+        int32_t slot, requestBuffer_cb _hidl_cb) {
+    sp<GraphicBuffer> buf;
+    status_t status = mBase->requestBuffer(slot, &buf);
+    AnwBuffer anwBuffer;
+    wrapAs(&anwBuffer, *buf);
+    _hidl_cb(toStatus(status), anwBuffer);
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::setMaxDequeuedBufferCount(
+        int32_t maxDequeuedBuffers) {
+    return toStatus(mBase->setMaxDequeuedBufferCount(
+            static_cast<int>(maxDequeuedBuffers)));
+}
+
+Return<Status> TWOmxBufferProducer::setAsyncMode(bool async) {
+    return toStatus(mBase->setAsyncMode(async));
+}
+
+Return<void> TWOmxBufferProducer::dequeueBuffer(
+        uint32_t width, uint32_t height,
+        PixelFormat format, uint32_t usage,
+        bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) {
+    int slot;
+    sp<Fence> fence;
+    ::android::FrameEventHistoryDelta outTimestamps;
+    status_t status = mBase->dequeueBuffer(
+            &slot, &fence,
+            width, height,
+            static_cast<::android::PixelFormat>(format), usage,
+            getFrameTimestamps ? &outTimestamps : nullptr);
+
+    hidl_handle tFence;
+    native_handle_t* nh;
+    if (!wrapAs(&tFence, &nh, *fence)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::dequeueBuffer(): "
+                "Cannot wrap Fence in hidl_handle"));
+    }
+    FrameEventHistoryDelta tOutTimestamps;
+    std::vector<std::vector<native_handle_t*> > nhAA;
+    if (getFrameTimestamps && !wrapAs(&tOutTimestamps, &nhAA, outTimestamps)) {
+        native_handle_delete(nh);
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::dequeueBuffer(): "
+                "Cannot wrap Fence in hidl_handle"));
+    }
+
+    _hidl_cb(toStatus(status),
+            static_cast<int32_t>(slot),
+            tFence,
+            tOutTimestamps);
+    native_handle_delete(nh);
+    if (getFrameTimestamps) {
+        for (auto& nhA : nhAA) {
+            for (auto& handle : nhA) {
+                if (handle != nullptr) {
+                    native_handle_delete(handle);
+                }
+            }
+        }
+    }
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::detachBuffer(int32_t slot) {
+    return toStatus(mBase->detachBuffer(slot));
+}
+
+Return<void> TWOmxBufferProducer::detachNextBuffer(
+        detachNextBuffer_cb _hidl_cb) {
+    sp<GraphicBuffer> outBuffer;
+    sp<Fence> outFence;
+    status_t status = mBase->detachNextBuffer(&outBuffer, &outFence);
+
+    AnwBuffer tBuffer;
+    wrapAs(&tBuffer, *outBuffer);
+    hidl_handle tFence;
+    native_handle_t* nh;
+    if (!wrapAs(&tFence, &nh, *outFence)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::detachNextBuffer(): "
+                "Cannot wrap Fence in hidl_handle"));
+    }
+
+    _hidl_cb(toStatus(status), tBuffer, tFence);
+    native_handle_delete(nh);
+    return Void();
+}
+
+Return<void> TWOmxBufferProducer::attachBuffer(
+        const AnwBuffer& buffer,
+        attachBuffer_cb _hidl_cb) {
+    int outSlot;
+    sp<GraphicBuffer> lBuffer = new GraphicBuffer();
+    if (!convertTo(lBuffer.get(), buffer)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::attachBuffer(): "
+                "Cannot convert AnwBuffer to GraphicBuffer"));
+    }
+    status_t status = mBase->attachBuffer(&outSlot, lBuffer);
+
+    _hidl_cb(toStatus(status), static_cast<int32_t>(outSlot));
+    return Void();
+}
+
+Return<void> TWOmxBufferProducer::queueBuffer(
+        int32_t slot, const QueueBufferInput& input,
+        queueBuffer_cb _hidl_cb) {
+    IGraphicBufferProducer::QueueBufferInput lInput(
+            0, false, HAL_DATASPACE_UNKNOWN,
+            ::android::Rect(0, 0, 1, 1),
+            NATIVE_WINDOW_SCALING_MODE_FREEZE,
+            0, ::android::Fence::NO_FENCE);
+    if (!convertTo(&lInput, input)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::queueBuffer(): "
+                "Cannot convert IOmxBufferProducer::QueueBufferInput "
+                "to IGraphicBufferProducer::QueueBufferInput"));
+    }
+    IGraphicBufferProducer::QueueBufferOutput lOutput;
+    status_t status = mBase->queueBuffer(
+            static_cast<int>(slot), lInput, &lOutput);
+
+    QueueBufferOutput tOutput;
+    std::vector<std::vector<native_handle_t*> > nhAA;
+    if (!wrapAs(&tOutput, &nhAA, lOutput)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::queueBuffer(): "
+                "Cannot wrap IGraphicBufferProducer::QueueBufferOutput "
+                "in IOmxBufferProducer::QueueBufferOutput"));
+    }
+
+    _hidl_cb(toStatus(status), tOutput);
+    for (auto& nhA : nhAA) {
+        for (auto& nh : nhA) {
+            if (nh != nullptr) {
+                native_handle_delete(nh);
+            }
+        }
+    }
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::cancelBuffer(
+        int32_t slot, const hidl_handle& fence) {
+    sp<Fence> lFence = new Fence();
+    if (!convertTo(lFence.get(), fence)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::cancelBuffer(): "
+                "Cannot convert hidl_handle to Fence"));
+    }
+    return toStatus(mBase->cancelBuffer(static_cast<int>(slot), lFence));
+}
+
+Return<void> TWOmxBufferProducer::query(int32_t what, query_cb _hidl_cb) {
+    int lValue;
+    int lReturn = mBase->query(static_cast<int>(what), &lValue);
+    _hidl_cb(static_cast<int32_t>(lReturn), static_cast<int32_t>(lValue));
+    return Void();
+}
+
+Return<void> TWOmxBufferProducer::connect(
+        const sp<IOmxProducerListener>& listener,
+        int32_t api, bool producerControlledByApp, connect_cb _hidl_cb) {
+    sp<IProducerListener> lListener = new LWOmxProducerListener(listener);
+    IGraphicBufferProducer::QueueBufferOutput lOutput;
+    status_t status = mBase->connect(lListener,
+            static_cast<int>(api),
+            producerControlledByApp,
+            &lOutput);
+
+    QueueBufferOutput tOutput;
+    std::vector<std::vector<native_handle_t*> > nhAA;
+    if (!wrapAs(&tOutput, &nhAA, lOutput)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::connect(): "
+                "Cannot wrap IGraphicBufferProducer::QueueBufferOutput "
+                "in IOmxBufferProducer::QueueBufferOutput"));
+    }
+
+    _hidl_cb(toStatus(status), tOutput);
+    for (auto& nhA : nhAA) {
+        for (auto& nh : nhA) {
+            if (nh != nullptr) {
+                native_handle_delete(nh);
+            }
+        }
+    }
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::disconnect(
+        int32_t api, DisconnectMode mode) {
+    return toStatus(mBase->disconnect(
+            static_cast<int>(api),
+            toGuiDisconnectMode(mode)));
+}
+
+Return<Status> TWOmxBufferProducer::setSidebandStream(const hidl_handle& stream) {
+    return toStatus(mBase->setSidebandStream(NativeHandle::create(
+            native_handle_clone(stream), true)));
+}
+
+Return<void> TWOmxBufferProducer::allocateBuffers(
+        uint32_t width, uint32_t height, PixelFormat format, uint32_t usage) {
+    mBase->allocateBuffers(
+            width, height,
+            static_cast<::android::PixelFormat>(format),
+            usage);
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::allowAllocation(bool allow) {
+    return toStatus(mBase->allowAllocation(allow));
+}
+
+Return<Status> TWOmxBufferProducer::setGenerationNumber(uint32_t generationNumber) {
+    return toStatus(mBase->setGenerationNumber(generationNumber));
+}
+
+Return<void> TWOmxBufferProducer::getConsumerName(getConsumerName_cb _hidl_cb) {
+    _hidl_cb(mBase->getConsumerName().string());
+    return Void();
+}
+
+Return<Status> TWOmxBufferProducer::setSharedBufferMode(bool sharedBufferMode) {
+    return toStatus(mBase->setSharedBufferMode(sharedBufferMode));
+}
+
+Return<Status> TWOmxBufferProducer::setAutoRefresh(bool autoRefresh) {
+    return toStatus(mBase->setAutoRefresh(autoRefresh));
+}
+
+Return<Status> TWOmxBufferProducer::setDequeueTimeout(int64_t timeoutNs) {
+    return toStatus(mBase->setDequeueTimeout(timeoutNs));
+}
+
+Return<void> TWOmxBufferProducer::getLastQueuedBuffer(
+        getLastQueuedBuffer_cb _hidl_cb) {
+    sp<GraphicBuffer> lOutBuffer = new GraphicBuffer();
+    sp<Fence> lOutFence = new Fence();
+    float lOutTransformMatrix[16];
+    status_t status = mBase->getLastQueuedBuffer(
+            &lOutBuffer, &lOutFence, lOutTransformMatrix);
+
+    AnwBuffer tOutBuffer;
+    wrapAs(&tOutBuffer, *lOutBuffer);
+    hidl_handle tOutFence;
+    native_handle_t* nh;
+    if (!wrapAs(&tOutFence, &nh, *lOutFence)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::getLastQueuedBuffer(): "
+                "Cannot wrap Fence in hidl_handle"));
+    }
+    hidl_array<float, 16> tOutTransformMatrix(lOutTransformMatrix);
+
+    _hidl_cb(toStatus(status), tOutBuffer, tOutFence, tOutTransformMatrix);
+    native_handle_delete(nh);
+    return Void();
+}
+
+Return<void> TWOmxBufferProducer::getFrameTimestamps(
+        getFrameTimestamps_cb _hidl_cb) {
+    ::android::FrameEventHistoryDelta lDelta;
+    mBase->getFrameTimestamps(&lDelta);
+
+    FrameEventHistoryDelta tDelta;
+    std::vector<std::vector<native_handle_t*> > nhAA;
+    if (!wrapAs(&tDelta, &nhAA, lDelta)) {
+        return ::android::hardware::Status::fromExceptionCode(
+                ::android::hardware::Status::EX_BAD_PARCELABLE,
+                String8("TWOmxBufferProducer::getFrameTimestamps(): "
+                "Cannot wrap ::android::FrameEventHistoryDelta "
+                "in FrameEventHistoryDelta"));
+    }
+
+    _hidl_cb(tDelta);
+    for (auto& nhA : nhAA) {
+        for (auto& nh : nhA) {
+            if (nh != nullptr) {
+                native_handle_delete(nh);
+            }
+        }
+    }
+    return Void();
+}
+
+Return<void> TWOmxBufferProducer::getUniqueId(getUniqueId_cb _hidl_cb) {
+    uint64_t outId;
+    status_t status = mBase->getUniqueId(&outId);
+    _hidl_cb(toStatus(status), outId);
+    return Void();
+}
+
+// LWOmxBufferProducer
+
+LWOmxBufferProducer::LWOmxBufferProducer(sp<IOmxBufferProducer> const& base) :
+    mBase(base) {
+}
+
+status_t LWOmxBufferProducer::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
+    *buf = new GraphicBuffer();
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->requestBuffer(
+            static_cast<int32_t>(slot),
+            [&fnStatus, &buf] (Status status, AnwBuffer const& buffer) {
+                fnStatus = toStatusT(status);
+                if (!convertTo(buf->get(), buffer)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::setMaxDequeuedBufferCount(
+        int maxDequeuedBuffers) {
+    return toStatusT(mBase->setMaxDequeuedBufferCount(
+            static_cast<int32_t>(maxDequeuedBuffers)));
+}
+
+status_t LWOmxBufferProducer::setAsyncMode(bool async) {
+    return toStatusT(mBase->setAsyncMode(async));
+}
+
+status_t LWOmxBufferProducer::dequeueBuffer(
+        int* slot, sp<Fence>* fence,
+        uint32_t w, uint32_t h, ::android::PixelFormat format,
+        uint32_t usage, FrameEventHistoryDelta* outTimestamps) {
+    *fence = new Fence();
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->dequeueBuffer(
+            w, h, static_cast<PixelFormat>(format), usage,
+            outTimestamps != nullptr,
+            [&fnStatus, slot, fence, outTimestamps] (
+                    Status status,
+                    int32_t tSlot,
+                    hidl_handle const& tFence,
+                    IOmxBufferProducer::FrameEventHistoryDelta const& tTs) {
+                fnStatus = toStatusT(status);
+                *slot = tSlot;
+                if (!convertTo(fence->get(), tFence)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+                if (outTimestamps && !convertTo(outTimestamps, tTs)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::detachBuffer(int slot) {
+    return toStatusT(mBase->detachBuffer(static_cast<int>(slot)));
+}
+
+status_t LWOmxBufferProducer::detachNextBuffer(
+        sp<GraphicBuffer>* outBuffer, sp<Fence>* outFence) {
+    *outBuffer = new GraphicBuffer();
+    *outFence = new Fence();
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->detachNextBuffer(
+            [&fnStatus, outBuffer, outFence] (
+                    Status status,
+                    AnwBuffer const& tBuffer,
+                    hidl_handle const& tFence) {
+                fnStatus = toStatusT(status);
+                if (!convertTo(outFence->get(), tFence)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+                if (!convertTo(outBuffer->get(), tBuffer)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::attachBuffer(
+        int* outSlot, const sp<GraphicBuffer>& buffer) {
+    AnwBuffer tBuffer;
+    wrapAs(&tBuffer, *buffer);
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->attachBuffer(tBuffer,
+            [&fnStatus, outSlot] (Status status, int32_t slot) {
+                fnStatus = toStatusT(status);
+                *outSlot = slot;
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::queueBuffer(
+        int slot,
+        const QueueBufferInput& input,
+        QueueBufferOutput* output) {
+    IOmxBufferProducer::QueueBufferInput tInput;
+    native_handle_t* nh;
+    if (!wrapAs(&tInput, &nh, input)) {
+        return BAD_VALUE;
+    }
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->queueBuffer(slot, tInput,
+            [&fnStatus, output] (
+                    Status status,
+                    IOmxBufferProducer::QueueBufferOutput const& tOutput) {
+                fnStatus = toStatusT(status);
+                if (!convertTo(output, tOutput)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+            }));
+    native_handle_delete(nh);
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::cancelBuffer(int slot, const sp<Fence>& fence) {
+    hidl_handle tFence;
+    native_handle_t* nh;
+    if (!wrapAs(&tFence, &nh, *fence)) {
+        return BAD_VALUE;
+    }
+
+    status_t status = toStatusT(mBase->cancelBuffer(
+            static_cast<int32_t>(slot), tFence));
+    native_handle_delete(nh);
+    return status;
+}
+
+int LWOmxBufferProducer::query(int what, int* value) {
+    int result;
+    status_t transStatus = toStatusT(mBase->query(
+            static_cast<int32_t>(what),
+            [&result, value] (int32_t tResult, int32_t tValue) {
+                result = static_cast<int>(tResult);
+                *value = static_cast<int>(tValue);
+            }));
+    return transStatus == NO_ERROR ? result : static_cast<int>(transStatus);
+}
+
+status_t LWOmxBufferProducer::connect(
+        const sp<IProducerListener>& listener, int api,
+        bool producerControlledByApp, QueueBufferOutput* output) {
+    sp<IOmxProducerListener> tListener = new TWOmxProducerListener(listener);
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->connect(
+            tListener, static_cast<int32_t>(api), producerControlledByApp,
+            [&fnStatus, output] (
+                    Status status,
+                    IOmxBufferProducer::QueueBufferOutput const& tOutput) {
+                fnStatus = toStatusT(status);
+                if (!convertTo(output, tOutput)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxBufferProducer::disconnect(int api, DisconnectMode mode) {
+    return toStatusT(mBase->disconnect(
+            static_cast<int32_t>(api), toOmxDisconnectMode(mode)));
+}
+
+status_t LWOmxBufferProducer::setSidebandStream(
+        const sp<NativeHandle>& stream) {
+    return toStatusT(mBase->setSidebandStream(stream->handle()));
+}
+
+void LWOmxBufferProducer::allocateBuffers(uint32_t width, uint32_t height,
+        ::android::PixelFormat format, uint32_t usage) {
+    mBase->allocateBuffers(
+            width, height, static_cast<PixelFormat>(format), usage);
+}
+
+status_t LWOmxBufferProducer::allowAllocation(bool allow) {
+    return toStatusT(mBase->allowAllocation(allow));
+}
+
+status_t LWOmxBufferProducer::setGenerationNumber(uint32_t generationNumber) {
+    return toStatusT(mBase->setGenerationNumber(generationNumber));
+}
+
+String8 LWOmxBufferProducer::getConsumerName() const {
+    String8 lName;
+    mBase->getConsumerName([&lName] (hidl_string const& name) {
+                lName = name.c_str();
+            });
+    return lName;
+}
+
+status_t LWOmxBufferProducer::setSharedBufferMode(bool sharedBufferMode) {
+    return toStatusT(mBase->setSharedBufferMode(sharedBufferMode));
+}
+
+status_t LWOmxBufferProducer::setAutoRefresh(bool autoRefresh) {
+    return toStatusT(mBase->setAutoRefresh(autoRefresh));
+}
+
+status_t LWOmxBufferProducer::setDequeueTimeout(nsecs_t timeout) {
+    return toStatusT(mBase->setDequeueTimeout(static_cast<int64_t>(timeout)));
+}
+
+status_t LWOmxBufferProducer::getLastQueuedBuffer(
+        sp<GraphicBuffer>* outBuffer,
+        sp<Fence>* outFence,
+        float outTransformMatrix[16]) {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->getLastQueuedBuffer(
+            [&fnStatus, outBuffer, outFence, &outTransformMatrix] (
+                    Status status,
+                    AnwBuffer const& buffer,
+                    hidl_handle const& fence,
+                    hidl_array<float, 16> const& transformMatrix) {
+                fnStatus = toStatusT(status);
+                *outBuffer = new GraphicBuffer();
+                if (!convertTo(outBuffer->get(), buffer)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+                *outFence = new Fence();
+                if (!convertTo(outFence->get(), fence)) {
+                    fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
+                }
+                std::copy(transformMatrix.data(),
+                        transformMatrix.data() + 16,
+                        outTransformMatrix);
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+void LWOmxBufferProducer::getFrameTimestamps(FrameEventHistoryDelta* outDelta) {
+    mBase->getFrameTimestamps([outDelta] (
+            IOmxBufferProducer::FrameEventHistoryDelta const& tDelta) {
+                convertTo(outDelta, tDelta);
+            });
+}
+
+status_t LWOmxBufferProducer::getUniqueId(uint64_t* outId) const {
+    status_t fnStatus;
+    status_t transStatus = toStatusT(mBase->getUniqueId(
+            [&fnStatus, outId] (Status status, uint64_t id) {
+                fnStatus = toStatusT(status);
+                *outId = id;
+            }));
+    return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+::android::IBinder* LWOmxBufferProducer::onAsBinder() {
+    return nullptr;
+}
+
+}  // namespace utils
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.h b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.h
new file mode 100644
index 0000000..46abd27
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferProducer.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
+
+#include <android/hardware/media/omx/1.0/IOmxBufferProducer.h>
+#include <binder/Binder.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/IProducerListener.h>
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+using ::android::hardware::media::omx::V1_0::IOmxBufferProducer;
+using ::android::hardware::media::omx::V1_0::IOmxProducerListener;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hardware::media::V1_0::AnwBuffer;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::IGraphicBufferProducer;
+using ::android::IProducerListener;
+
+struct TWOmxBufferProducer : public IOmxBufferProducer {
+    sp<IGraphicBufferProducer> mBase;
+    TWOmxBufferProducer(sp<IGraphicBufferProducer> const& base);
+    Return<void> requestBuffer(int32_t slot, requestBuffer_cb _hidl_cb)
+            override;
+    Return<Status> setMaxDequeuedBufferCount(int32_t maxDequeuedBuffers)
+            override;
+    Return<Status> setAsyncMode(bool async) override;
+    Return<void> dequeueBuffer(
+            uint32_t width, uint32_t height, PixelFormat format, uint32_t usage,
+            bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) override;
+    Return<Status> detachBuffer(int32_t slot) override;
+    Return<void> detachNextBuffer(detachNextBuffer_cb _hidl_cb) override;
+    Return<void> attachBuffer(const AnwBuffer& buffer, attachBuffer_cb _hidl_cb)
+            override;
+    Return<void> queueBuffer(
+            int32_t slot, const IOmxBufferProducer::QueueBufferInput& input,
+            queueBuffer_cb _hidl_cb) override;
+    Return<Status> cancelBuffer(int32_t slot, const hidl_handle& fence)
+            override;
+    Return<void> query(int32_t what, query_cb _hidl_cb) override;
+    Return<void> connect(const sp<IOmxProducerListener>& listener,
+            int32_t api, bool producerControlledByApp,
+            connect_cb _hidl_cb) override;
+    Return<Status> disconnect(
+            int32_t api,
+            IOmxBufferProducer::DisconnectMode mode) override;
+    Return<Status> setSidebandStream(const hidl_handle& stream) override;
+    Return<void> allocateBuffers(
+            uint32_t width, uint32_t height,
+            PixelFormat format, uint32_t usage) override;
+    Return<Status> allowAllocation(bool allow) override;
+    Return<Status> setGenerationNumber(uint32_t generationNumber) override;
+    Return<void> getConsumerName(getConsumerName_cb _hidl_cb) override;
+    Return<Status> setSharedBufferMode(bool sharedBufferMode) override;
+    Return<Status> setAutoRefresh(bool autoRefresh) override;
+    Return<Status> setDequeueTimeout(int64_t timeoutNs) override;
+    Return<void> getLastQueuedBuffer(getLastQueuedBuffer_cb _hidl_cb) override;
+    Return<void> getFrameTimestamps(getFrameTimestamps_cb _hidl_cb) override;
+    Return<void> getUniqueId(getUniqueId_cb _hidl_cb) override;
+};
+
+struct LWOmxBufferProducer : public IGraphicBufferProducer {
+    sp<IOmxBufferProducer> mBase;
+    LWOmxBufferProducer(sp<IOmxBufferProducer> const& base);
+
+    status_t requestBuffer(int slot, sp<GraphicBuffer>* buf) override;
+    status_t setMaxDequeuedBufferCount(int maxDequeuedBuffers) override;
+    status_t setAsyncMode(bool async) override;
+    status_t dequeueBuffer(int* slot, sp<Fence>* fence, uint32_t w,
+            uint32_t h, ::android::PixelFormat format, uint32_t usage,
+            FrameEventHistoryDelta* outTimestamps) override;
+    status_t detachBuffer(int slot) override;
+    status_t detachNextBuffer(sp<GraphicBuffer>* outBuffer, sp<Fence>* outFence)
+            override;
+    status_t attachBuffer(int* outSlot, const sp<GraphicBuffer>& buffer)
+            override;
+    status_t queueBuffer(int slot,
+            const QueueBufferInput& input,
+            QueueBufferOutput* output) override;
+    status_t cancelBuffer(int slot, const sp<Fence>& fence) override;
+    int query(int what, int* value) override;
+    status_t connect(const sp<IProducerListener>& listener, int api,
+            bool producerControlledByApp, QueueBufferOutput* output) override;
+    status_t disconnect(int api, DisconnectMode mode = DisconnectMode::Api)
+            override;
+    status_t setSidebandStream(const sp<NativeHandle>& stream) override;
+    void allocateBuffers(uint32_t width, uint32_t height,
+            ::android::PixelFormat format, uint32_t usage) override;
+    status_t allowAllocation(bool allow) override;
+    status_t setGenerationNumber(uint32_t generationNumber) override;
+    String8 getConsumerName() const override;
+    status_t setSharedBufferMode(bool sharedBufferMode) override;
+    status_t setAutoRefresh(bool autoRefresh) override;
+    status_t setDequeueTimeout(nsecs_t timeout) override;
+    status_t getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer,
+          sp<Fence>* outFence, float outTransformMatrix[16]) override;
+    void getFrameTimestamps(FrameEventHistoryDelta* outDelta) override;
+    status_t getUniqueId(uint64_t* outId) const override;
+protected:
+    ::android::IBinder* onAsBinder() override;
+};
+
+}  // namespace utils
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
diff --git a/media/libstagefright/omx/hal/1.0/WOmxBufferSource.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.cpp
similarity index 76%
copy from media/libstagefright/omx/hal/1.0/WOmxBufferSource.cpp
copy to media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.cpp
index 79eb1be..1ebd9a7 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxBufferSource.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #include "WOmxBufferSource.h"
 #include "Conversion.h"
 #include <utils/String8.h>
@@ -8,7 +24,7 @@
 namespace media {
 namespace omx {
 namespace V1_0 {
-namespace implementation {
+namespace utils {
 
 // LWOmxBufferSource
 LWOmxBufferSource::LWOmxBufferSource(sp<IOmxBufferSource> const& base) :
@@ -82,12 +98,15 @@
 Return<void> TWOmxBufferSource::onInputBufferEmptied(
         uint32_t buffer, hidl_handle const& fence) {
     OMXFenceParcelable fenceParcelable;
-    wrapAs(&fenceParcelable, fence);
+    if (!convertTo(&fenceParcelable, fence)) {
+      return ::android::hardware::Status::fromExceptionCode(
+              ::android::hardware::Status::EX_BAD_PARCELABLE);
+    }
     return toHardwareStatus(mBase->onInputBufferEmptied(
             static_cast<int32_t>(buffer), fenceParcelable));
 }
 
-}  // namespace implementation
+}  // namespace utils
 }  // namespace V1_0
 }  // namespace omx
 }  // namespace media
diff --git a/media/libstagefright/omx/hal/1.0/WOmxBufferSource.h b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.h
similarity index 78%
copy from media/libstagefright/omx/hal/1.0/WOmxBufferSource.h
copy to media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.h
index 3ba9453..3bf35c5 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxBufferSource.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxBufferSource.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
 #define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
 
@@ -14,7 +30,7 @@
 namespace media {
 namespace omx {
 namespace V1_0 {
-namespace implementation {
+namespace utils {
 
 using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
 using ::android::hidl::base::V1_0::IBase;
@@ -64,7 +80,7 @@
 };
 
 
-}  // namespace implementation
+}  // namespace utils
 }  // namespace V1_0
 }  // namespace omx
 }  // namespace media
diff --git a/media/libstagefright/omx/hal/1.0/WOmxNode.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmxNode.cpp
similarity index 90%
copy from media/libstagefright/omx/hal/1.0/WOmxNode.cpp
copy to media/libstagefright/omx/hal/1.0/utils/WOmxNode.cpp
index 0e781d8..6764ba8 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxNode.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxNode.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #include <IOMX.h>
 #include <OMXNodeInstance.h>
 #include "WOmxNode.h"
@@ -11,7 +27,7 @@
 namespace media {
 namespace omx {
 namespace V1_0 {
-namespace implementation {
+namespace utils {
 
 using ::android::hardware::Void;
 
@@ -50,7 +66,6 @@
 status_t LWOmxNode::setParameter(
         OMX_INDEXTYPE index, const void *params, size_t size) {
     hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
-    tParams = inHidlBytes(params, size);
     return toStatusT(mBase->setParameter(
             toRawIndexType(index), tParams));
 }
@@ -102,7 +117,8 @@
             [&fnStatus, sidebandHandle](
                     Status status, hidl_handle const& outSidebandHandle) {
                 fnStatus = toStatusT(status);
-                *sidebandHandle = native_handle_clone(outSidebandHandle);
+                *sidebandHandle = outSidebandHandle == nullptr ?
+                        nullptr : native_handle_clone(outSidebandHandle);
             }));
     return transStatus == NO_ERROR ? fnStatus : transStatus;
 }
@@ -180,6 +196,7 @@
     }
     status_t status = toStatusT(mBase->fillBuffer(
             buffer, codecBuffer, fenceNh));
+    native_handle_close(fenceNh);
     native_handle_delete(fenceNh);
     return status;
 }
@@ -201,6 +218,7 @@
             flags,
             toRawTicks(timestamp),
             fenceNh));
+    native_handle_close(fenceNh);
     native_handle_delete(fenceNh);
     return status;
 }
@@ -224,13 +242,14 @@
         return NO_MEMORY;
     }
     status_t status = toStatusT(mBase->dispatchMessage(tMsg));
+    native_handle_close(nh);
     native_handle_delete(nh);
     return status;
 }
 
 // TODO: this is temporary, will be removed when quirks move to OMX side.
-status_t LWOmxNode::setQuirks(OMX_U32 /* quirks */) {
-    return NO_ERROR;
+status_t LWOmxNode::setQuirks(OMX_U32 quirks) {
+    return toStatusT(mBase->setQuirks(static_cast<uint32_t>(quirks)));;
 }
 
 ::android::IBinder* LWOmxNode::onAsBinder() {
@@ -306,7 +325,7 @@
 Return<void> TWOmxNode::configureVideoTunnelMode(
         uint32_t portIndex, bool tunneled, uint32_t audioHwSync,
         configureVideoTunnelMode_cb _hidl_cb) {
-    native_handle_t* sidebandHandle;
+    native_handle_t* sidebandHandle = nullptr;
     Status status = toStatus(mBase->configureVideoTunnelMode(
             portIndex,
             toEnumBool(tunneled),
@@ -376,7 +395,7 @@
     return toStatus(mBase->fillBuffer(
             buffer,
             omxBuffer,
-            native_handle_read_fd(fence)));
+            dup(native_handle_read_fd(fence))));
 }
 
 Return<Status> TWOmxNode::emptyBuffer(
@@ -391,7 +410,7 @@
             omxBuffer,
             flags,
             toOMXTicks(timestampUs),
-            native_handle_read_fd(fence)));
+            dup(native_handle_read_fd(fence))));
 }
 
 Return<void> TWOmxNode::getExtensionIndex(
@@ -406,14 +425,18 @@
 
 Return<Status> TWOmxNode::dispatchMessage(const Message& tMsg) {
     omx_message lMsg;
-    if (!wrapAs(&lMsg, tMsg)) {
+    if (!convertTo(&lMsg, tMsg)) {
         return Status::BAD_VALUE;
     }
     return toStatus(mBase->dispatchMessage(lMsg));
 }
 
+Return<void> TWOmxNode::setQuirks(uint32_t quirks) {
+    mBase->setQuirks(static_cast<OMX_U32>(quirks));
+    return Void();
+}
 
-}  // namespace implementation
+}  // namespace utils
 }  // namespace V1_0
 }  // namespace omx
 }  // namespace media
diff --git a/media/libstagefright/omx/hal/1.0/WOmxNode.h b/media/libstagefright/omx/hal/1.0/utils/WOmxNode.h
similarity index 89%
rename from media/libstagefright/omx/hal/1.0/WOmxNode.h
rename to media/libstagefright/omx/hal/1.0/utils/WOmxNode.h
index 3a459d6..cb0b1a7 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxNode.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxNode.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
 #define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
 
@@ -13,7 +29,7 @@
 namespace media {
 namespace omx {
 namespace V1_0 {
-namespace implementation {
+namespace utils {
 
 using ::android::hardware::media::omx::V1_0::CodecBuffer;
 using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
@@ -137,9 +153,10 @@
             hidl_string const& parameterName,
             getExtensionIndex_cb _hidl_cb) override;
     Return<Status> dispatchMessage(Message const& msg) override;
+    Return<void> setQuirks(uint32_t quirks) override;
 };
 
-}  // namespace implementation
+}  // namespace utils
 }  // namespace V1_0
 }  // namespace omx
 }  // namespace media
diff --git a/media/libstagefright/omx/hal/1.0/WOmxObserver.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.cpp
similarity index 64%
copy from media/libstagefright/omx/hal/1.0/WOmxObserver.cpp
copy to media/libstagefright/omx/hal/1.0/utils/WOmxObserver.cpp
index f3ad8d3..fea5a9a 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxObserver.cpp
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #include "WOmxObserver.h"
 
 #include <vector>
@@ -12,7 +28,7 @@
 namespace media {
 namespace omx {
 namespace V1_0 {
-namespace implementation {
+namespace utils {
 
 // LWOmxObserver
 LWOmxObserver::LWOmxObserver(sp<IOmxObserver> const& base) : mBase(base) {
@@ -29,6 +45,7 @@
     }
     mBase->onMessages(tMessages);
     for (auto& handle : handles) {
+        native_handle_close(handle);
         native_handle_delete(handle);
     }
 }
@@ -45,13 +62,13 @@
     std::list<omx_message> lMessages;
     for (size_t i = 0; i < tMessages.size(); ++i) {
         lMessages.push_back(omx_message{});
-        wrapAs(&lMessages.back(), tMessages[i]);
+        convertTo(&lMessages.back(), tMessages[i]);
     }
     mBase->onMessages(lMessages);
     return Return<void>();
 }
 
-}  // namespace implementation
+}  // namespace utils
 }  // namespace V1_0
 }  // namespace omx
 }  // namespace media
diff --git a/media/libstagefright/omx/hal/1.0/WOmxObserver.h b/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.h
similarity index 72%
copy from media/libstagefright/omx/hal/1.0/WOmxObserver.h
copy to media/libstagefright/omx/hal/1.0/utils/WOmxObserver.h
index c8ab296..b1e2eb1 100644
--- a/media/libstagefright/omx/hal/1.0/WOmxObserver.h
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxObserver.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
 #define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
 
@@ -13,7 +29,7 @@
 namespace media {
 namespace omx {
 namespace V1_0 {
-namespace implementation {
+namespace utils {
 
 using ::android::hardware::media::omx::V1_0::IOmxObserver;
 using ::android::hardware::media::omx::V1_0::Message;
@@ -52,7 +68,7 @@
     Return<void> onMessages(const hidl_vec<Message>& tMessages) override;
 };
 
-}  // namespace implementation
+}  // namespace utils
 }  // namespace V1_0
 }  // namespace omx
 }  // namespace media
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.cpp b/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.cpp
new file mode 100644
index 0000000..d43215d
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WOmxProducerListener.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+// TWOmxProducerListener
+TWOmxProducerListener::TWOmxProducerListener(
+        sp<IProducerListener> const& base):
+    mBase(base) {
+}
+
+Return<void> TWOmxProducerListener::onBufferReleased() {
+    mBase->onBufferReleased();
+    return Void();
+}
+
+Return<bool> TWOmxProducerListener::needsReleaseNotify() {
+    return mBase->needsReleaseNotify();
+}
+
+// LWOmxProducerListener
+LWOmxProducerListener::LWOmxProducerListener(
+        sp<IOmxProducerListener> const& base):
+    mBase(base) {
+}
+
+void LWOmxProducerListener::onBufferReleased() {
+    mBase->onBufferReleased();
+}
+
+bool LWOmxProducerListener::needsReleaseNotify() {
+    return static_cast<bool>(mBase->needsReleaseNotify());
+}
+
+::android::IBinder* LWOmxProducerListener::onAsBinder() {
+    return nullptr;
+}
+
+
+}  // namespace utils
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.h b/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.h
new file mode 100644
index 0000000..5b5e830
--- /dev/null
+++ b/media/libstagefright/omx/hal/1.0/utils/WOmxProducerListener.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
+
+#include <android/hardware/media/omx/1.0/IOmxProducerListener.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <binder/IBinder.h>
+#include <gui/IProducerListener.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::media::omx::V1_0::IOmxProducerListener;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::IProducerListener;
+
+struct TWOmxProducerListener : public IOmxProducerListener {
+    sp<IProducerListener> mBase;
+    TWOmxProducerListener(sp<IProducerListener> const& base);
+    Return<void> onBufferReleased() override;
+    Return<bool> needsReleaseNotify() override;
+};
+
+class LWOmxProducerListener : public IProducerListener {
+public:
+    sp<IOmxProducerListener> mBase;
+    LWOmxProducerListener(sp<IOmxProducerListener> const& base);
+    void onBufferReleased() override;
+    bool needsReleaseNotify() override;
+protected:
+    ::android::IBinder* onAsBinder() override;
+};
+
+}  // namespace utils
+}  // namespace V1_0
+}  // namespace omx
+}  // namespace media
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
diff --git a/media/libstagefright/omx/tests/Android.mk b/media/libstagefright/omx/tests/Android.mk
index ec14eb7..5e4ba10 100644
--- a/media/libstagefright/omx/tests/Android.mk
+++ b/media/libstagefright/omx/tests/Android.mk
@@ -9,7 +9,8 @@
 
 LOCAL_C_INCLUDES := \
 	$(TOP)/frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax
+	$(TOP)/frameworks/native/include/media/openmax \
+	$(TOP)/system/libhidl/base/include \
 
 LOCAL_CFLAGS += -Werror -Wall
 
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index d419133..ea58343 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -466,7 +466,7 @@
     mr->setVideoSource(videoSource);
     mr->setOutputFormat(outputFormat);
     mr->setVideoEncoder(videoEncoder);
-    mr->setOutputFile(fd, 0, 0);
+    mr->setOutputFile(fd);
     mr->setVideoSize(width, height);
     mr->setVideoFrameRate(fps);
     mr->prepare();
diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk
index c5322a4..a1b4aec 100644
--- a/media/libstagefright/wifi-display/Android.mk
+++ b/media/libstagefright/wifi-display/Android.mk
@@ -31,7 +31,8 @@
         libutils                        \
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_SANITIZE := signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
 
 LOCAL_MODULE:= libstagefright_wfd
 
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index d11a10d..6df2065 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -27,6 +27,7 @@
 #include <string.h>
 #include <sys/endian.h>
 #include <sys/ioctl.h>
+#include <sys/mman.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
@@ -34,6 +35,7 @@
 
 #include "AsyncIO.h"
 #include "MtpFfsHandle.h"
+#include "mtp.h"
 
 #define cpu_to_le16(x)  htole16(x)
 #define cpu_to_le32(x)  htole32(x)
@@ -57,8 +59,8 @@
 // To get good performance, override these with
 // higher values per device using the properties
 // sys.usb.ffs.max_read and sys.usb.ffs.max_write
-constexpr int USB_FFS_MAX_WRITE = 32768;
-constexpr int USB_FFS_MAX_READ = 32768;
+constexpr int USB_FFS_MAX_WRITE = MTP_BUFFER_SIZE;
+constexpr int USB_FFS_MAX_READ = MTP_BUFFER_SIZE;
 
 constexpr unsigned int MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
 
@@ -107,6 +109,7 @@
     .bInterfaceClass = USB_CLASS_STILL_IMAGE,
     .bInterfaceSubClass = 1,
     .bInterfaceProtocol = 1,
+    .iInterface = 1,
 };
 
 const struct usb_interface_descriptor ptp_interface_desc = {
@@ -259,14 +262,23 @@
     .intr_comp = ss_intr_comp,
 };
 
+#define STR_INTERFACE "MTP"
 const struct {
     struct usb_functionfs_strings_head header;
+    struct {
+        __le16 code;
+        const char str1[sizeof(STR_INTERFACE)];
+    } __attribute__((packed)) lang0;
 } __attribute__((packed)) strings = {
     .header = {
         .magic = cpu_to_le32(FUNCTIONFS_STRINGS_MAGIC),
         .length = cpu_to_le32(sizeof(strings)),
-        .str_count = cpu_to_le32(0),
-        .lang_count = cpu_to_le32(0),
+        .str_count = cpu_to_le32(1),
+        .lang_count = cpu_to_le32(1),
+    },
+    .lang0 = {
+        .code = cpu_to_le16(0x0409),
+        .str1 = STR_INTERFACE,
     },
 };
 
@@ -333,28 +345,9 @@
     if (mBulkIn > -1 || mBulkOut > -1 || mIntr > -1)
         LOG(WARNING) << "Endpoints were not closed before configure!";
 
-    mBulkIn.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_IN, O_RDWR)));
-    if (mBulkIn < 0) {
-        PLOG(ERROR) << FFS_MTP_EP_IN << ": cannot open bulk in ep";
-        goto err;
-    }
-
-    mBulkOut.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_OUT, O_RDWR)));
-    if (mBulkOut < 0) {
-        PLOG(ERROR) << FFS_MTP_EP_OUT << ": cannot open bulk out ep";
-        goto err;
-    }
-
-    mIntr.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_INTR, O_RDWR)));
-    if (mIntr < 0) {
-        PLOG(ERROR) << FFS_MTP_EP0 << ": cannot open intr ep";
-        goto err;
-    }
-
     return true;
 
 err:
-    closeEndpoints();
     closeConfig();
     return false;
 }
@@ -436,7 +429,49 @@
 
 int MtpFfsHandle::start() {
     mLock.lock();
-    return 0;
+
+    mBulkIn.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_IN, O_RDWR)));
+    if (mBulkIn < 0) {
+        PLOG(ERROR) << FFS_MTP_EP_IN << ": cannot open bulk in ep";
+        return -1;
+    }
+
+    mBulkOut.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_OUT, O_RDWR)));
+    if (mBulkOut < 0) {
+        PLOG(ERROR) << FFS_MTP_EP_OUT << ": cannot open bulk out ep";
+        return -1;
+    }
+
+    mIntr.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_INTR, O_RDWR)));
+    if (mIntr < 0) {
+        PLOG(ERROR) << FFS_MTP_EP0 << ": cannot open intr ep";
+        return -1;
+    }
+
+    mBuffer1.resize(MAX_FILE_CHUNK_SIZE);
+    mBuffer2.resize(MAX_FILE_CHUNK_SIZE);
+    posix_madvise(mBuffer1.data(), MAX_FILE_CHUNK_SIZE,
+            POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED);
+    posix_madvise(mBuffer2.data(), MAX_FILE_CHUNK_SIZE,
+            POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED);
+
+    // Get device specific r/w size
+    mMaxWrite = android::base::GetIntProperty("sys.usb.ffs.max_write", USB_FFS_MAX_WRITE);
+    mMaxRead = android::base::GetIntProperty("sys.usb.ffs.max_read", USB_FFS_MAX_READ);
+
+    while (mMaxWrite > USB_FFS_MAX_WRITE && mMaxRead > USB_FFS_MAX_READ) {
+        // If larger contiguous chunks of memory aren't available, attempt to try
+        // smaller allocations.
+        if (ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(mMaxWrite)) ||
+            ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(mMaxRead))) {
+            mMaxWrite /= 2;
+            mMaxRead /=2;
+        } else {
+            return 0;
+        }
+    }
+    PLOG(ERROR) << "Functionfs could not allocate any memory!";
+    return -1;
 }
 
 int MtpFfsHandle::configure(bool usePtp) {
@@ -454,13 +489,6 @@
         return -1;
     }
 
-    // Get device specific r/w size
-    mMaxWrite = android::base::GetIntProperty("sys.usb.ffs.max_write", 0);
-    mMaxRead = android::base::GetIntProperty("sys.usb.ffs.max_read", 0);
-    if (!mMaxWrite)
-        mMaxWrite = USB_FFS_MAX_WRITE;
-    if (!mMaxRead)
-        mMaxRead = USB_FFS_MAX_READ;
     return 0;
 }
 
@@ -469,24 +497,6 @@
     mLock.unlock();
 }
 
-class ScopedEndpointBufferAlloc {
-private:
-    const int mFd;
-    const unsigned int mAllocSize;
-public:
-    ScopedEndpointBufferAlloc(int fd, unsigned alloc_size) :
-        mFd(fd),
-        mAllocSize(alloc_size) {
-        if (ioctl(mFd, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(mAllocSize)))
-            PLOG(DEBUG) << "FFS endpoint alloc failed!";
-    }
-
-    ~ScopedEndpointBufferAlloc() {
-        if (ioctl(mFd, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(0)))
-            PLOG(DEBUG) << "FFS endpoint alloc reset failed!";
-    }
-};
-
 /* Read from USB and write to a local file. */
 int MtpFfsHandle::receiveFile(mtp_file_range mfr) {
     // When receiving files, the incoming length is given in 32 bits.
@@ -494,15 +504,8 @@
     uint32_t file_length = mfr.length;
     uint64_t offset = lseek(mfr.fd, 0, SEEK_CUR);
 
-    int buf1_len = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
-    std::vector<char> buf1(buf1_len);
-    char* data = buf1.data();
-
-    // If necessary, allocate a second buffer for background r/w
-    int buf2_len = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE),
-            file_length - MAX_FILE_CHUNK_SIZE);
-    std::vector<char> buf2(std::max(0, buf2_len));
-    char *data2 = buf2.data();
+    char *data = mBuffer1.data();
+    char *data2 = mBuffer2.data();
 
     struct aiocb aio;
     aio.aio_fildes = mfr.fd;
@@ -514,7 +517,6 @@
     bool write = false;
 
     posix_fadvise(mfr.fd, 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
-    ScopedEndpointBufferAlloc(mBulkOut, mMaxRead);
 
     // Break down the file into pieces that fit in buffers
     while (file_length > 0 || write) {
@@ -592,19 +594,12 @@
         packet_size = mBulkIn_desc.wMaxPacketSize;
     }
 
-    posix_fadvise(mfr.fd, 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
-
     int init_read_len = packet_size - sizeof(mtp_data_header);
-    int buf1_len = std::max(static_cast<uint64_t>(packet_size), std::min(
-                  static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length - init_read_len));
-    std::vector<char> buf1(buf1_len);
-    char *data = buf1.data();
 
-    // If necessary, allocate a second buffer for background r/w
-    int buf2_len = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE),
-            file_length - MAX_FILE_CHUNK_SIZE - init_read_len);
-    std::vector<char> buf2(std::max(0, buf2_len));
-    char *data2 = buf2.data();
+    char *data = mBuffer1.data();
+    char *data2 = mBuffer2.data();
+
+    posix_fadvise(mfr.fd, 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
 
     struct aiocb aio;
     aio.aio_fildes = mfr.fd;
@@ -630,8 +625,6 @@
     if (writeHandle(mBulkIn, data, packet_size) == -1) return -1;
     if (file_length == 0) return 0;
 
-    ScopedEndpointBufferAlloc(mBulkIn, mMaxWrite);
-
     // Break down the file into pieces that fit in buffers
     while(file_length > 0) {
         if (read) {
@@ -655,7 +648,7 @@
         }
 
         if (file_length > 0) {
-            length = std::min((uint64_t) MAX_FILE_CHUNK_SIZE, file_length);
+            length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
             // Queue up another read
             aio.aio_buf = data;
             aio.aio_offset = offset;
diff --git a/media/mtp/MtpFfsHandle.h b/media/mtp/MtpFfsHandle.h
index 9cd4dcf..44ff0f3 100644
--- a/media/mtp/MtpFfsHandle.h
+++ b/media/mtp/MtpFfsHandle.h
@@ -48,6 +48,9 @@
     int mMaxWrite;
     int mMaxRead;
 
+    std::vector<char> mBuffer1;
+    std::vector<char> mBuffer2;
+
 public:
     int read(void *data, int len);
     int write(const void *data, int len);
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 8d56c16..753d833 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -179,6 +179,7 @@
 
     if (sHandle->start()) {
         ALOGE("Failed to start usb driver!");
+        sHandle->close();
         return;
     }
 
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index cdce932..7a9240b 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -172,7 +172,8 @@
         return NULL;
     }
 
-    status_t err = drm->createPlugin(uuid);
+    String8 nullPackageName;
+    status_t err = drm->createPlugin(uuid, nullPackageName);
 
     if (err != OK) {
         return NULL;
diff --git a/radio/IRadio.cpp b/radio/IRadio.cpp
index 5bbe7cb..72f3b68 100644
--- a/radio/IRadio.cpp
+++ b/radio/IRadio.cpp
@@ -16,6 +16,7 @@
 */
 
 #define LOG_TAG "IRadio"
+//#define LOG_NDEBUG 0
 #include <utils/Log.h>
 #include <utils/Errors.h>
 #include <binder/IMemory.h>
@@ -23,7 +24,7 @@
 #include <radio/IRadioService.h>
 #include <radio/IRadioClient.h>
 #include <system/radio.h>
-#include <system/radio_metadata.h>
+#include <system/RadioMetadataWrapper.h>
 
 namespace android {
 
@@ -300,14 +301,9 @@
         case GET_PROGRAM_INFORMATION: {
             CHECK_INTERFACE(IRadio, data, reply);
             struct radio_program_info info;
-            status_t status;
+            RadioMetadataWrapper metadataWrapper(&info.metadata);
 
-            status = radio_metadata_allocate(&info.metadata, 0, 0);
-            if (status != NO_ERROR) {
-                return status;
-            }
-            status = getProgramInformation(&info);
-
+            status_t status = getProgramInformation(&info);
             reply->writeInt32(status);
             if (status == NO_ERROR) {
                 reply->write(&info, sizeof(struct radio_program_info));
@@ -319,7 +315,6 @@
                     reply->writeUint32(0);
                 }
             }
-            radio_metadata_deallocate(info.metadata);
             return NO_ERROR;
         }
         case HAS_CONTROL: {
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 8aaa388..c737f4b 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1880,7 +1880,7 @@
               config->channel_mask,
               flags);
 
-    if (*devices == AUDIO_DEVICE_NONE) {
+    if (devices == NULL || *devices == AUDIO_DEVICE_NONE) {
         return BAD_VALUE;
     }
 
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 35eceb2..e97d1ed 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -80,6 +80,7 @@
 class EffectsFactoryHalInterface;
 class FastMixer;
 class PassthruBufferProvider;
+class RecordBufferConverter;
 class ServerProxy;
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 343ad25..70929e4 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1630,11 +1630,16 @@
         // and 'update' / 'commit' do nothing for allocated buffers, thus
         // it's not needed to consider any other buffers here.
         mInBuffer->update();
-        mOutBuffer->update();
+        if (mInBuffer->audioBuffer()->raw != mOutBuffer->audioBuffer()->raw) {
+            mOutBuffer->update();
+        }
         for (size_t i = 0; i < size; i++) {
             mEffects[i]->process();
         }
-        mOutBuffer->commit();
+        mInBuffer->commit();
+        if (mInBuffer->audioBuffer()->raw != mOutBuffer->audioBuffer()->raw) {
+            mOutBuffer->commit();
+        }
     }
     bool doResetVolume = false;
     for (size_t i = 0; i < size; i++) {
@@ -1935,6 +1940,19 @@
     }
 }
 
+static void dumpInOutBuffer(
+        char *dump, size_t dumpSize, bool isInput, EffectBufferHalInterface *buffer) {
+    if (buffer == nullptr) {
+        snprintf(dump, dumpSize, "%p", buffer);
+    } else if (buffer->externalData() != nullptr) {
+        snprintf(dump, dumpSize, "%p -> %p",
+                isInput ? buffer->externalData() : buffer->audioBuffer()->raw,
+                isInput ? buffer->audioBuffer()->raw : buffer->externalData());
+    } else {
+        snprintf(dump, dumpSize, "%p", buffer->audioBuffer()->raw);
+    }
+}
+
 void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args)
 {
     const size_t SIZE = 256;
@@ -1952,11 +1970,14 @@
             result.append("\tCould not lock mutex:\n");
         }
 
-        result.append("\tIn buffer   Out buffer   Active tracks:\n");
-        snprintf(buffer, SIZE, "\t%p  %p   %d\n",
-                mInBuffer->audioBuffer(),
-                mOutBuffer->audioBuffer(),
-                mActiveTrackCnt);
+        char inBufferStr[64], outBufferStr[64];
+        dumpInOutBuffer(inBufferStr, sizeof(inBufferStr), true, mInBuffer.get());
+        dumpInOutBuffer(outBufferStr, sizeof(outBufferStr), false, mOutBuffer.get());
+        snprintf(buffer, SIZE, "\t%-*s%-*s   Active tracks:\n",
+                (int)strlen(inBufferStr), "In buffer    ",
+                (int)strlen(outBufferStr), "Out buffer      ");
+        result.append(buffer);
+        snprintf(buffer, SIZE, "\t%s   %s   %d\n", inBufferStr, outBufferStr, mActiveTrackCnt);
         result.append(buffer);
         write(fd, result.string(), result.size());
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b40d51c0d..a1d81f9 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -29,6 +29,7 @@
 #include <cutils/properties.h>
 #include <media/AudioParameter.h>
 #include <media/AudioResamplerPublic.h>
+#include <media/RecordBufferConverter.h>
 #include <media/TypeConverter.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
@@ -1266,6 +1267,7 @@
     bool chainCreated = false;
     bool effectCreated = false;
     bool effectRegistered = false;
+    audio_unique_id_t effectId = AUDIO_UNIQUE_ID_USE_UNSPECIFIED;
 
     lStatus = initCheck();
     if (lStatus != NO_ERROR) {
@@ -1299,15 +1301,16 @@
         ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
 
         if (effect == 0) {
-            audio_unique_id_t id = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
+            effectId = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
             // Check CPU and memory usage
-            lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id);
+            lStatus = AudioSystem::registerEffect(
+                    desc, mId, chain->strategy(), sessionId, effectId);
             if (lStatus != NO_ERROR) {
                 goto Exit;
             }
             effectRegistered = true;
             // create a new effect module if none present in the chain
-            lStatus = chain->createEffect_l(effect, this, desc, id, sessionId, pinned);
+            lStatus = chain->createEffect_l(effect, this, desc, effectId, sessionId, pinned);
             if (lStatus != NO_ERROR) {
                 goto Exit;
             }
@@ -1336,7 +1339,7 @@
             chain->removeEffect_l(effect);
         }
         if (effectRegistered) {
-            AudioSystem::unregisterEffect(effect->id());
+            AudioSystem::unregisterEffect(effectId);
         }
         if (chainCreated) {
             removeEffectChain_l(chain);
@@ -6953,252 +6956,6 @@
     buffer->frameCount = 0;
 }
 
-AudioFlinger::RecordThread::RecordBufferConverter::RecordBufferConverter(
-        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-        uint32_t srcSampleRate,
-        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-        uint32_t dstSampleRate) :
-            mSrcChannelMask(AUDIO_CHANNEL_INVALID), // updateParameters will set following vars
-            // mSrcFormat
-            // mSrcSampleRate
-            // mDstChannelMask
-            // mDstFormat
-            // mDstSampleRate
-            // mSrcChannelCount
-            // mDstChannelCount
-            // mDstFrameSize
-            mBuf(NULL), mBufFrames(0), mBufFrameSize(0),
-            mResampler(NULL),
-            mIsLegacyDownmix(false),
-            mIsLegacyUpmix(false),
-            mRequiresFloat(false),
-            mInputConverterProvider(NULL)
-{
-    (void)updateParameters(srcChannelMask, srcFormat, srcSampleRate,
-            dstChannelMask, dstFormat, dstSampleRate);
-}
-
-AudioFlinger::RecordThread::RecordBufferConverter::~RecordBufferConverter() {
-    free(mBuf);
-    delete mResampler;
-    delete mInputConverterProvider;
-}
-
-size_t AudioFlinger::RecordThread::RecordBufferConverter::convert(void *dst,
-        AudioBufferProvider *provider, size_t frames)
-{
-    if (mInputConverterProvider != NULL) {
-        mInputConverterProvider->setBufferProvider(provider);
-        provider = mInputConverterProvider;
-    }
-
-    if (mResampler == NULL) {
-        ALOGVV("NO RESAMPLING sampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
-                mSrcSampleRate, mSrcFormat, mDstFormat);
-
-        AudioBufferProvider::Buffer buffer;
-        for (size_t i = frames; i > 0; ) {
-            buffer.frameCount = i;
-            status_t status = provider->getNextBuffer(&buffer);
-            if (status != OK || buffer.frameCount == 0) {
-                frames -= i; // cannot fill request.
-                break;
-            }
-            // format convert to destination buffer
-            convertNoResampler(dst, buffer.raw, buffer.frameCount);
-
-            dst = (int8_t*)dst + buffer.frameCount * mDstFrameSize;
-            i -= buffer.frameCount;
-            provider->releaseBuffer(&buffer);
-        }
-    } else {
-         ALOGVV("RESAMPLING mSrcSampleRate:%u mDstSampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
-                 mSrcSampleRate, mDstSampleRate, mSrcFormat, mDstFormat);
-
-         // reallocate buffer if needed
-         if (mBufFrameSize != 0 && mBufFrames < frames) {
-             free(mBuf);
-             mBufFrames = frames;
-             (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
-         }
-        // resampler accumulates, but we only have one source track
-        memset(mBuf, 0, frames * mBufFrameSize);
-        frames = mResampler->resample((int32_t*)mBuf, frames, provider);
-        // format convert to destination buffer
-        convertResampler(dst, mBuf, frames);
-    }
-    return frames;
-}
-
-status_t AudioFlinger::RecordThread::RecordBufferConverter::updateParameters(
-        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-        uint32_t srcSampleRate,
-        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-        uint32_t dstSampleRate)
-{
-    // quick evaluation if there is any change.
-    if (mSrcFormat == srcFormat
-            && mSrcChannelMask == srcChannelMask
-            && mSrcSampleRate == srcSampleRate
-            && mDstFormat == dstFormat
-            && mDstChannelMask == dstChannelMask
-            && mDstSampleRate == dstSampleRate) {
-        return NO_ERROR;
-    }
-
-    ALOGV("RecordBufferConverter updateParameters srcMask:%#x dstMask:%#x"
-            "  srcFormat:%#x dstFormat:%#x  srcRate:%u dstRate:%u",
-            srcChannelMask, dstChannelMask, srcFormat, dstFormat, srcSampleRate, dstSampleRate);
-    const bool valid =
-            audio_is_input_channel(srcChannelMask)
-            && audio_is_input_channel(dstChannelMask)
-            && audio_is_valid_format(srcFormat) && audio_is_linear_pcm(srcFormat)
-            && audio_is_valid_format(dstFormat) && audio_is_linear_pcm(dstFormat)
-            && (srcSampleRate <= dstSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX)
-            ; // no upsampling checks for now
-    if (!valid) {
-        return BAD_VALUE;
-    }
-
-    mSrcFormat = srcFormat;
-    mSrcChannelMask = srcChannelMask;
-    mSrcSampleRate = srcSampleRate;
-    mDstFormat = dstFormat;
-    mDstChannelMask = dstChannelMask;
-    mDstSampleRate = dstSampleRate;
-
-    // compute derived parameters
-    mSrcChannelCount = audio_channel_count_from_in_mask(srcChannelMask);
-    mDstChannelCount = audio_channel_count_from_in_mask(dstChannelMask);
-    mDstFrameSize = mDstChannelCount * audio_bytes_per_sample(mDstFormat);
-
-    // do we need to resample?
-    delete mResampler;
-    mResampler = NULL;
-    if (mSrcSampleRate != mDstSampleRate) {
-        mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_FLOAT,
-                mSrcChannelCount, mDstSampleRate);
-        mResampler->setSampleRate(mSrcSampleRate);
-        mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
-    }
-
-    // are we running legacy channel conversion modes?
-    mIsLegacyDownmix = (mSrcChannelMask == AUDIO_CHANNEL_IN_STEREO
-                            || mSrcChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK)
-                   && mDstChannelMask == AUDIO_CHANNEL_IN_MONO;
-    mIsLegacyUpmix = mSrcChannelMask == AUDIO_CHANNEL_IN_MONO
-                   && (mDstChannelMask == AUDIO_CHANNEL_IN_STEREO
-                            || mDstChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK);
-
-    // do we need to process in float?
-    mRequiresFloat = mResampler != NULL || mIsLegacyDownmix || mIsLegacyUpmix;
-
-    // do we need a staging buffer to convert for destination (we can still optimize this)?
-    // we use mBufFrameSize > 0 to indicate both frame size as well as buffer necessity
-    if (mResampler != NULL) {
-        mBufFrameSize = max(mSrcChannelCount, FCC_2)
-                * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
-    } else if (mIsLegacyUpmix || mIsLegacyDownmix) { // legacy modes always float
-        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
-    } else if (mSrcChannelMask != mDstChannelMask && mDstFormat != mSrcFormat) {
-        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(mSrcFormat);
-    } else {
-        mBufFrameSize = 0;
-    }
-    mBufFrames = 0; // force the buffer to be resized.
-
-    // do we need an input converter buffer provider to give us float?
-    delete mInputConverterProvider;
-    mInputConverterProvider = NULL;
-    if (mRequiresFloat && mSrcFormat != AUDIO_FORMAT_PCM_FLOAT) {
-        mInputConverterProvider = new ReformatBufferProvider(
-                audio_channel_count_from_in_mask(mSrcChannelMask),
-                mSrcFormat,
-                AUDIO_FORMAT_PCM_FLOAT,
-                256 /* provider buffer frame count */);
-    }
-
-    // do we need a remixer to do channel mask conversion
-    if (!mIsLegacyDownmix && !mIsLegacyUpmix && mSrcChannelMask != mDstChannelMask) {
-        (void) memcpy_by_index_array_initialization_from_channel_mask(
-                mIdxAry, ARRAY_SIZE(mIdxAry), mDstChannelMask, mSrcChannelMask);
-    }
-    return NO_ERROR;
-}
-
-void AudioFlinger::RecordThread::RecordBufferConverter::convertNoResampler(
-        void *dst, const void *src, size_t frames)
-{
-    // src is native type unless there is legacy upmix or downmix, whereupon it is float.
-    if (mBufFrameSize != 0 && mBufFrames < frames) {
-        free(mBuf);
-        mBufFrames = frames;
-        (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
-    }
-    // do we need to do legacy upmix and downmix?
-    if (mIsLegacyUpmix || mIsLegacyDownmix) {
-        void *dstBuf = mBuf != NULL ? mBuf : dst;
-        if (mIsLegacyUpmix) {
-            upmix_to_stereo_float_from_mono_float((float *)dstBuf,
-                    (const float *)src, frames);
-        } else /*mIsLegacyDownmix */ {
-            downmix_to_mono_float_from_stereo_float((float *)dstBuf,
-                    (const float *)src, frames);
-        }
-        if (mBuf != NULL) {
-            memcpy_by_audio_format(dst, mDstFormat, mBuf, AUDIO_FORMAT_PCM_FLOAT,
-                    frames * mDstChannelCount);
-        }
-        return;
-    }
-    // do we need to do channel mask conversion?
-    if (mSrcChannelMask != mDstChannelMask) {
-        void *dstBuf = mBuf != NULL ? mBuf : dst;
-        memcpy_by_index_array(dstBuf, mDstChannelCount,
-                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mSrcFormat), frames);
-        if (dstBuf == dst) {
-            return; // format is the same
-        }
-    }
-    // convert to destination buffer
-    const void *convertBuf = mBuf != NULL ? mBuf : src;
-    memcpy_by_audio_format(dst, mDstFormat, convertBuf, mSrcFormat,
-            frames * mDstChannelCount);
-}
-
-void AudioFlinger::RecordThread::RecordBufferConverter::convertResampler(
-        void *dst, /*not-a-const*/ void *src, size_t frames)
-{
-    // src buffer format is ALWAYS float when entering this routine
-    if (mIsLegacyUpmix) {
-        ; // mono to stereo already handled by resampler
-    } else if (mIsLegacyDownmix
-            || (mSrcChannelMask == mDstChannelMask && mSrcChannelCount == 1)) {
-        // the resampler outputs stereo for mono input channel (a feature?)
-        // must convert to mono
-        downmix_to_mono_float_from_stereo_float((float *)src,
-                (const float *)src, frames);
-    } else if (mSrcChannelMask != mDstChannelMask) {
-        // convert to mono channel again for channel mask conversion (could be skipped
-        // with further optimization).
-        if (mSrcChannelCount == 1) {
-            downmix_to_mono_float_from_stereo_float((float *)src,
-                (const float *)src, frames);
-        }
-        // convert to destination format (in place, OK as float is larger than other types)
-        if (mDstFormat != AUDIO_FORMAT_PCM_FLOAT) {
-            memcpy_by_audio_format(src, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
-                    frames * mSrcChannelCount);
-        }
-        // channel convert and save to dst
-        memcpy_by_index_array(dst, mDstChannelCount,
-                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mDstFormat), frames);
-        return;
-    }
-    // convert to destination format and save to dst
-    memcpy_by_audio_format(dst, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
-            frames * mDstChannelCount);
-}
 
 bool AudioFlinger::RecordThread::checkForNewParameter_l(const String8& keyValuePair,
                                                         status_t& status)
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index e43f001..3fb0b07 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1323,92 +1323,6 @@
                                             // rolling counter that is never cleared
     };
 
-    /* The RecordBufferConverter is used for format, channel, and sample rate
-     * conversion for a RecordTrack.
-     *
-     * TODO: Self contained, so move to a separate file later.
-     *
-     * RecordBufferConverter uses the convert() method rather than exposing a
-     * buffer provider interface; this is to save a memory copy.
-     */
-    class RecordBufferConverter
-    {
-    public:
-        RecordBufferConverter(
-                audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-                uint32_t srcSampleRate,
-                audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-                uint32_t dstSampleRate);
-
-        ~RecordBufferConverter();
-
-        /* Converts input data from an AudioBufferProvider by format, channelMask,
-         * and sampleRate to a destination buffer.
-         *
-         * Parameters
-         *      dst:  buffer to place the converted data.
-         * provider:  buffer provider to obtain source data.
-         *   frames:  number of frames to convert
-         *
-         * Returns the number of frames converted.
-         */
-        size_t convert(void *dst, AudioBufferProvider *provider, size_t frames);
-
-        // returns NO_ERROR if constructor was successful
-        status_t initCheck() const {
-            // mSrcChannelMask set on successful updateParameters
-            return mSrcChannelMask != AUDIO_CHANNEL_INVALID ? NO_ERROR : NO_INIT;
-        }
-
-        // allows dynamic reconfigure of all parameters
-        status_t updateParameters(
-                audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-                uint32_t srcSampleRate,
-                audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-                uint32_t dstSampleRate);
-
-        // called to reset resampler buffers on record track discontinuity
-        void reset() {
-            if (mResampler != NULL) {
-                mResampler->reset();
-            }
-        }
-
-    private:
-        // format conversion when not using resampler
-        void convertNoResampler(void *dst, const void *src, size_t frames);
-
-        // format conversion when using resampler; modifies src in-place
-        void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
-
-        // user provided information
-        audio_channel_mask_t mSrcChannelMask;
-        audio_format_t       mSrcFormat;
-        uint32_t             mSrcSampleRate;
-        audio_channel_mask_t mDstChannelMask;
-        audio_format_t       mDstFormat;
-        uint32_t             mDstSampleRate;
-
-        // derived information
-        uint32_t             mSrcChannelCount;
-        uint32_t             mDstChannelCount;
-        size_t               mDstFrameSize;
-
-        // format conversion buffer
-        void                *mBuf;
-        size_t               mBufFrames;
-        size_t               mBufFrameSize;
-
-        // resampler info
-        AudioResampler      *mResampler;
-
-        bool                 mIsLegacyDownmix;  // legacy stereo to mono conversion needed
-        bool                 mIsLegacyUpmix;    // legacy mono to stereo conversion needed
-        bool                 mRequiresFloat;    // data processing requires float (e.g. resampler)
-        PassthruBufferProvider *mInputConverterProvider;    // converts input to float
-        int8_t               mIdxAry[sizeof(uint32_t) * 8]; // used for channel mask conversion
-    };
-
 #include "RecordTracks.h"
 
             RecordThread(const sp<AudioFlinger>& audioFlinger,
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 48e09c7..f2dd884 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -32,6 +32,7 @@
 
 #include <media/nbaio/Pipe.h>
 #include <media/nbaio/PipeReader.h>
+#include <media/RecordBufferConverter.h>
 #include <audio_utils/minifloat.h>
 
 // ----------------------------------------------------------------------------
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 5445413..b169bac 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -57,7 +57,7 @@
     void clearPreemptedSessions();
     bool isActive() const;
     bool isSourceActive(audio_source_t source) const;
-    audio_source_t inputSource() const;
+    audio_source_t inputSource(bool activeOnly = false) const;
     bool isSoundTrigger() const;
     status_t addAudioSession(audio_session_t session,
                              const sp<AudioSession>& audioSession);
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index 97a9c94..0bacef7 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -43,6 +43,8 @@
 
     void setMix(AudioMix &mix);
 
+    status_t dump(int fd, int spaces, int index) const;
+
 private:
     AudioMix    mMix;                   // Audio policy mix descriptor
     sp<SwAudioOutputDescriptor> mOutput;  // Corresponding output stream
@@ -77,6 +79,8 @@
                                                   AudioMix **policyMix);
 
     status_t getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix);
+
+    status_t dump(int fd) const;
 };
 
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
index 84e3a36..9e705aa 100644
--- a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -28,7 +28,25 @@
     typedef device_category Type;
     typedef Vector<Type> Collection;
 };
+struct MixTypeTraits
+{
+    typedef int32_t Type;
+    typedef Vector<Type> Collection;
+};
+struct RouteFlagTraits
+{
+    typedef uint32_t Type;
+    typedef Vector<Type> Collection;
+};
+struct RuleTraits
+{
+    typedef uint32_t Type;
+    typedef Vector<Type> Collection;
+};
 
 typedef TypeConverter<DeviceCategoryTraits> DeviceCategoryConverter;
+typedef TypeConverter<MixTypeTraits> MixTypeConverter;
+typedef TypeConverter<RouteFlagTraits> RouteFlagTypeConverter;
+typedef TypeConverter<RuleTraits> RuleTypeConverter;
 
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 44f9637..2492ed6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -63,11 +63,9 @@
     return mId;
 }
 
-audio_source_t AudioInputDescriptor::inputSource() const
+audio_source_t AudioInputDescriptor::inputSource(bool activeOnly) const
 {
-    // TODO: return highest priority input source
-    return mSessions.size() > 0 ? mSessions.valueAt(0)->inputSource() :
-                       AUDIO_SOURCE_DEFAULT;
+    return getHighestPrioritySource(activeOnly);
 }
 
 void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index 1d6787a..c2981a1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -21,7 +21,7 @@
 #include "AudioGain.h"
 #include "TypeConverter.h"
 
-#include <android/log.h>
+#include <log/log.h>
 #include <utils/String8.h>
 
 namespace android {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 02833a9..08930f1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -18,6 +18,7 @@
 //#define LOG_NDEBUG 0
 
 #include "AudioPolicyMix.h"
+#include "TypeConverter.h"
 #include "HwModule.h"
 #include "AudioPort.h"
 #include "IOProfile.h"
@@ -51,6 +52,66 @@
     return &mMix;
 }
 
+status_t AudioPolicyMix::dump(int fd, int spaces, int index) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%*sAudio Policy Mix %d:\n", spaces, "", index+1);
+    result.append(buffer);
+    std::string mixTypeLiteral;
+    if (!MixTypeConverter::toString(mMix.mMixType, mixTypeLiteral)) {
+        ALOGE("%s: failed to convert mix type %d", __FUNCTION__, mMix.mMixType);
+        return BAD_VALUE;
+    }
+    snprintf(buffer, SIZE, "%*s- mix type: %s\n", spaces, "", mixTypeLiteral.c_str());
+    result.append(buffer);
+    std::string routeFlagLiteral;
+    RouteFlagTypeConverter::maskToString(mMix.mRouteFlags, routeFlagLiteral);
+    snprintf(buffer, SIZE, "%*s- Route Flags: %s\n", spaces, "", routeFlagLiteral.c_str());
+    result.append(buffer);
+    std::string deviceLiteral;
+    deviceToString(mMix.mDeviceType, deviceLiteral);
+    snprintf(buffer, SIZE, "%*s- device type: %s\n", spaces, "", deviceLiteral.c_str());
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- device address: %s\n", spaces, "", mMix.mDeviceAddress.string());
+    result.append(buffer);
+
+    int indexCriterion = 0;
+    for (const auto &criterion : mMix.mCriteria) {
+        snprintf(buffer, SIZE, "%*s- Criterion %d:\n", spaces + 2, "", indexCriterion++);
+        result.append(buffer);
+        std::string usageLiteral;
+        if (!UsageTypeConverter::toString(criterion.mValue.mUsage, usageLiteral)) {
+            ALOGE("%s: failed to convert usage %d", __FUNCTION__, criterion.mValue.mUsage);
+            return BAD_VALUE;
+        }
+        snprintf(buffer, SIZE, "%*s- Usage:%s\n", spaces + 4, "", usageLiteral.c_str());
+        result.append(buffer);
+        if (mMix.mMixType == MIX_TYPE_RECORDERS) {
+            std::string sourceLiteral;
+            if (!SourceTypeConverter::toString(criterion.mValue.mSource, sourceLiteral)) {
+                ALOGE("%s: failed to convert source %d", __FUNCTION__, criterion.mValue.mSource);
+                return BAD_VALUE;
+            }
+            snprintf(buffer, SIZE, "%*s- Source:%s\n", spaces + 4, "", sourceLiteral.c_str());
+            result.append(buffer);
+        }
+        snprintf(buffer, SIZE, "%*s- Uid:%d\n", spaces + 4, "", criterion.mValue.mUid);
+        result.append(buffer);
+        std::string ruleLiteral;
+        if (!RuleTypeConverter::toString(criterion.mRule, ruleLiteral)) {
+            ALOGE("%s: failed to convert source %d", __FUNCTION__,criterion.mRule);
+            return BAD_VALUE;
+        }
+        snprintf(buffer, SIZE, "%*s- Rule:%s\n", spaces + 4, "", ruleLiteral.c_str());
+        result.append(buffer);
+    }
+    write(fd, result.string(), result.size());
+    return NO_ERROR;
+}
+
 status_t AudioPolicyMixCollection::registerMix(const String8& address, AudioMix mix,
                                                sp<SwAudioOutputDescriptor> desc)
 {
@@ -288,4 +349,14 @@
     return NO_ERROR;
 }
 
+status_t AudioPolicyMixCollection::dump(int fd) const
+{
+    std::string log("\nAudio Policy Mix:\n");
+    write(fd, log.c_str(), log.size());
+    for (size_t i = 0; i < size(); i++) {
+        valueAt(i)->dump(fd, 2, i);
+    }
+    return NO_ERROR;
+}
+
 }; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index f19b43c..dbdcca7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -23,7 +23,7 @@
 #include "AudioGain.h"
 #include "TypeConverter.h"
 
-#include <android/log.h>
+#include <log/log.h>
 #include <utils/String8.h>
 
 namespace android {
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 4839683..0362037 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <media/AudioPolicy.h>
+
 #include "TypeConverter.h"
 
 namespace android {
@@ -30,6 +32,37 @@
     TERMINATOR
 };
 
+template <>
+const MixTypeConverter::Table MixTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(MIX_TYPE_INVALID),
+    MAKE_STRING_FROM_ENUM(MIX_TYPE_PLAYERS),
+    MAKE_STRING_FROM_ENUM(MIX_TYPE_RECORDERS),
+    TERMINATOR
+};
+
+template <>
+const RouteFlagTypeConverter::Table RouteFlagTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_RENDER),
+    MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_LOOP_BACK),
+    MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_ALL),
+    TERMINATOR
+};
+
+template <>
+const RuleTypeConverter::Table RuleTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(RULE_EXCLUSION_MASK),
+    MAKE_STRING_FROM_ENUM(RULE_MATCH_ATTRIBUTE_USAGE),
+    MAKE_STRING_FROM_ENUM(RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET),
+    MAKE_STRING_FROM_ENUM(RULE_MATCH_UID),
+    MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_ATTRIBUTE_USAGE),
+    MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET),
+    MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_UID),
+    TERMINATOR
+};
+
 template class TypeConverter<DeviceCategoryTraits>;
+template class TypeConverter<MixTypeTraits>;
+template class TypeConverter<RouteFlagTraits>;
+template class TypeConverter<RuleTraits>;
 
 }; // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 923d4f6..54c406c 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1651,13 +1651,15 @@
             } else if (isSoundTrigger) {
                 break;
             }
+
             // force close input if current source is now the highest priority request on this input
             // and current input properties are not exactly as requested.
-            if ((desc->mSamplingRate != samplingRate ||
+            if (!isConcurrentSource(inputSource) && !isConcurrentSource(desc->inputSource()) &&
+                    ((desc->mSamplingRate != samplingRate ||
                     desc->mChannelMask != channelMask ||
                     !audio_formats_match(desc->mFormat, format)) &&
                     (source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
-                     source_priority(inputSource))) {
+                     source_priority(inputSource)))) {
                 ALOGV("%s: ", __FUNCTION__);
                 AudioSessionCollection sessions = desc->getAudioSessions(false /*activeOnly*/);
                 for (size_t j = 0; j < sessions.size(); j++) {
@@ -1715,6 +1717,14 @@
     return input;
 }
 
+//static
+bool AudioPolicyManager::isConcurrentSource(audio_source_t source)
+{
+    return (source == AUDIO_SOURCE_HOTWORD) ||
+            (source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
+            (source == AUDIO_SOURCE_FM_TUNER);
+}
+
 bool AudioPolicyManager::isConcurentCaptureAllowed(const sp<AudioInputDescriptor>& inputDesc,
         const sp<AudioSession>& audioSession)
 {
@@ -1734,16 +1744,14 @@
     // 3) All other active captures are either for re-routing or HOTWORD
 
     if (is_virtual_input_device(inputDesc->mDevice) ||
-            audioSession->inputSource() == AUDIO_SOURCE_HOTWORD ||
-            audioSession->inputSource() == AUDIO_SOURCE_FM_TUNER) {
+            isConcurrentSource(audioSession->inputSource())) {
         return true;
     }
 
     Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
     for (size_t i = 0; i <  activeInputs.size(); i++) {
         sp<AudioInputDescriptor> activeInput = activeInputs[i];
-        if ((activeInput->inputSource() != AUDIO_SOURCE_HOTWORD) &&
-                (activeInput->inputSource() != AUDIO_SOURCE_FM_TUNER) &&
+        if (!isConcurrentSource(activeInput->inputSource(true)) &&
                 !is_virtual_input_device(activeInput->mDevice)) {
             return false;
         }
@@ -1924,7 +1932,7 @@
     ALOG_ASSERT(inputDesc != 0);
 
     sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
-    if (index < 0) {
+    if (audioSession == 0) {
         ALOGW("releaseInput() unknown session %d on input %d", session, input);
         return;
     }
@@ -2419,6 +2427,7 @@
     mVolumeCurves->dump(fd);
     mEffects.dump(fd);
     mAudioPatches.dump(fd);
+    mPolicyMixes.dump(fd);
 
     return NO_ERROR;
 }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 3c5ed3a..cea3f54 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -507,6 +507,8 @@
 
         void clearAudioSources(uid_t uid);
 
+        static bool isConcurrentSource(audio_source_t source);
+
         bool isConcurentCaptureAllowed(const sp<AudioInputDescriptor>& inputDesc,
                 const sp<AudioSession>& audioSession);
 
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index d1edb56..9328d65 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -48,8 +48,10 @@
     device3/Camera3OutputStream.cpp \
     device3/Camera3ZslStream.cpp \
     device3/Camera3DummyStream.cpp \
+    device3/Camera3SharedOutputStream.cpp \
     device3/StatusTracker.cpp \
     device3/Camera3BufferManager.cpp \
+    device3/Camera3StreamSplitter.cpp \
     gui/RingBufferConsumer.cpp \
     utils/CameraTraces.cpp \
     utils/AutoConditionLock.cpp \
@@ -88,12 +90,6 @@
 
 LOCAL_CFLAGS += -Wall -Wextra -Werror
 
-ifeq ($(ENABLE_TREBLE), true)
-
-  LOCAL_CFLAGS += -DENABLE_TREBLE
-
-endif # ENABLE_TREBLE
-
 LOCAL_MODULE:= libcameraservice
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index c92092c..f429df8 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -18,12 +18,6 @@
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
-#ifdef ENABLE_TREBLE
-  #define USE_HIDL true
-#else
-  #define USE_HIDL false
-#endif
-
 #include <algorithm>
 #include <climits>
 #include <stdio.h>
@@ -197,10 +191,13 @@
     notifier.noteResetFlashlight();
 
     status_t res = INVALID_OPERATION;
-    if (USE_HIDL) {
-        res = enumerateProviders();
-    } else {
+
+    bool disableTreble = property_get_bool("camera.disable_treble", false);
+    if (disableTreble) {
+        ALOGI("Treble disabled - using legacy path");
         res = loadLegacyHalModule();
+    } else {
+        res = enumerateProviders();
     }
     if (res == OK) {
         mInitialized = true;
@@ -324,7 +321,10 @@
     mNumberOfCameras = mCameraProviderManager->getCameraCount();
     mNumberOfNormalCameras = mCameraProviderManager->getStandardCameraCount();
 
-    // TODO: Set up vendor tags
+    // Setup vendor tags before we call get_camera_info the first time
+    // because HAL might need to setup static vendor keys in get_camera_info
+    // TODO: maybe put this into CameraProviderManager::initialize()?
+    mCameraProviderManager->setUpVendorTags();
 
     mFlashlight = new CameraFlashlight(mCameraProviderManager, this);
     res = mFlashlight->findFlashUnits();
@@ -684,7 +684,6 @@
 
 int CameraService::getDeviceVersion(const String8& cameraId, int* facing) {
     ATRACE_CALL();
-    struct camera_info info;
 
     int deviceVersion = 0;
 
@@ -692,6 +691,7 @@
         int id = cameraIdToInt(cameraId);
         if (id < 0) return -1;
 
+        struct camera_info info;
         if (mModule->getCameraInfo(id, &info) != OK) {
             return -1;
         }
@@ -710,8 +710,15 @@
         hardware::hidl_version maxVersion{0,0};
         res = mCameraProviderManager->getHighestSupportedVersion(String8::std_string(cameraId),
                 &maxVersion);
-        if (res == NAME_NOT_FOUND) return -1;
+        if (res != OK) return -1;
         deviceVersion = HARDWARE_DEVICE_API_VERSION(maxVersion.get_major(), maxVersion.get_minor());
+
+        hardware::CameraInfo info;
+        if (facing) {
+            res = mCameraProviderManager->getCameraInfo(String8::std_string(cameraId), &info);
+            if (res != OK) return -1;
+            *facing = info.facing;
+        }
     }
     return deviceVersion;
 }
@@ -1532,17 +1539,14 @@
         // give flashlight a chance to close devices if necessary.
         mFlashlight->prepareDeviceOpen(cameraId);
 
-        // TODO: Update getDeviceVersion + HAL interface to use strings for Camera IDs
-        int id = cameraIdToInt(cameraId);
-        if (id == -1) {
-            ALOGE("%s: Invalid camera ID %s, cannot get device version from HAL.", __FUNCTION__,
-                    cameraId.string());
-            return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
-                    "Bad camera ID \"%s\" passed to camera open", cameraId.string());
-        }
-
         int facing = -1;
         int deviceVersion = getDeviceVersion(cameraId, /*out*/&facing);
+        if (facing == -1) {
+            ALOGE("%s: Unable to get camera device \"%s\"  facing", __FUNCTION__, cameraId.string());
+            return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                    "Unable to get camera device \"%s\" facing", cameraId.string());
+        }
+
         sp<BasicClient> tmp = nullptr;
         if(!(ret = makeClient(this, cameraCb, clientPackageName, cameraId, facing, clientPid,
                 clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
@@ -1880,9 +1884,13 @@
     }
 
     int facing = -1;
-
-    int deviceVersion = getDeviceVersion(id, &facing);
-
+    int deviceVersion = getDeviceVersion(id, /*out*/ &facing);
+    if (facing == -1) {
+        String8 msg = String8::format("Unable to get facing for camera device %s",
+                id.string());
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(ERROR_INVALID_OPERATION, msg.string());
+    }
     switch(deviceVersion) {
         case CAMERA_DEVICE_API_VERSION_1_0:
         case CAMERA_DEVICE_API_VERSION_3_0:
@@ -2188,8 +2196,11 @@
         const std::set<userid_t>& newUserIds) {
     String8 newUsers = toString(newUserIds);
     String8 oldUsers = toString(oldUserIds);
+    if (oldUsers.size() == 0) {
+        oldUsers = "<None>";
+    }
     // Log the new and old users
-    logEvent(String8::format("USER_SWITCH previous allowed users: %s , current allowed users: %s",
+    logEvent(String8::format("USER_SWITCH previous allowed user IDs: %s, current allowed user IDs: %s",
             oldUsers.string(), newUsers.string()));
 }
 
@@ -2735,181 +2746,162 @@
 status_t CameraService::dump(int fd, const Vector<String16>& args) {
     ATRACE_CALL();
 
-    String8 result("Dump of the Camera Service:\n");
     if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
-        result = result.format("Permission Denial: "
-                "can't dump CameraService from pid=%d, uid=%d\n",
+        dprintf(fd, "Permission Denial: can't dump CameraService from pid=%d, uid=%d\n",
                 getCallingPid(),
                 getCallingUid());
-        write(fd, result.string(), result.size());
-    } else {
-        bool locked = tryLock(mServiceLock);
-        // failed to lock - CameraService is probably deadlocked
-        if (!locked) {
-            result.append("CameraService may be deadlocked\n");
-            write(fd, result.string(), result.size());
-        }
+        return NO_ERROR;
+    }
+    bool locked = tryLock(mServiceLock);
+    // failed to lock - CameraService is probably deadlocked
+    if (!locked) {
+        dprintf(fd, "!! CameraService may be deadlocked !!\n");
+    }
 
-        bool hasClient = false;
-        if (!mInitialized) {
-            result = String8::format("No camera HAL available!\n");
-            write(fd, result.string(), result.size());
+    if (!mInitialized) {
+        dprintf(fd, "!! No camera HAL available !!\n");
 
-            // Dump event log for error information
-            dumpEventLog(fd);
-
-            if (locked) mServiceLock.unlock();
-            return NO_ERROR;
-        }
-        if (mModule == nullptr) {
-            mCameraProviderManager->dump(fd, args);
-            // TODO - need way more dumping here
-
-            if (locked) mServiceLock.unlock();
-            return NO_ERROR;
-        }
-
-        result = String8::format("Camera module HAL API version: 0x%x\n", mModule->getHalApiVersion());
-        result.appendFormat("Camera module API version: 0x%x\n", mModule->getModuleApiVersion());
-        result.appendFormat("Camera module name: %s\n", mModule->getModuleName());
-        result.appendFormat("Camera module author: %s\n", mModule->getModuleAuthor());
-        result.appendFormat("Number of camera devices: %d\n", mNumberOfCameras);
-        result.appendFormat("Number of normal camera devices: %d\n", mNumberOfNormalCameras);
-        String8 activeClientString = mActiveClientManager.toString();
-        result.appendFormat("Active Camera Clients:\n%s", activeClientString.string());
-        result.appendFormat("Allowed users:\n%s\n", toString(mAllowedUsers).string());
-
-        sp<VendorTagDescriptor> desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
-        if (desc == NULL) {
-            result.appendFormat("Vendor tags left unimplemented.\n");
-        } else {
-            result.appendFormat("Vendor tag definitions:\n");
-        }
-
-        write(fd, result.string(), result.size());
-
-        if (desc != NULL) {
-            desc->dump(fd, /*verbosity*/2, /*indentation*/4);
-        }
-
+        // Dump event log for error information
         dumpEventLog(fd);
 
-        bool stateLocked = tryLock(mCameraStatesLock);
-        if (!stateLocked) {
-            result = String8::format("CameraStates in use, may be deadlocked\n");
-            write(fd, result.string(), result.size());
+        if (locked) mServiceLock.unlock();
+        return NO_ERROR;
+    }
+    dprintf(fd, "\n== Service global info: ==\n\n");
+    dprintf(fd, "Number of camera devices: %d\n", mNumberOfCameras);
+    dprintf(fd, "Number of normal camera devices: %d\n", mNumberOfNormalCameras);
+    String8 activeClientString = mActiveClientManager.toString();
+    dprintf(fd, "Active Camera Clients:\n%s", activeClientString.string());
+    dprintf(fd, "Allowed user IDs: %s\n", toString(mAllowedUsers).string());
+
+    dumpEventLog(fd);
+
+    bool stateLocked = tryLock(mCameraStatesLock);
+    if (!stateLocked) {
+        dprintf(fd, "CameraStates in use, may be deadlocked\n");
+    }
+
+    for (auto& state : mCameraStates) {
+        String8 cameraId = state.first;
+
+        dprintf(fd, "== Camera device %s dynamic info: ==\n", cameraId.string());
+
+        CameraParameters p = state.second->getShimParams();
+        if (!p.isEmpty()) {
+            dprintf(fd, "  Camera1 API shim is using parameters:\n        ");
+            p.dump(fd, args);
         }
 
-        for (auto& state : mCameraStates) {
-            String8 cameraId = state.first;
-            result = String8::format("Camera %s information:\n", cameraId.string());
-            camera_info info;
+        auto clientDescriptor = mActiveClientManager.get(cameraId);
+        if (clientDescriptor == nullptr) {
+            dprintf(fd, "  Device %s is closed, no client instance\n",
+                    cameraId.string());
+            continue;
+        }
+        dprintf(fd, "  Device %s is open. Client instance dump:\n",
+                cameraId.string());
+        dprintf(fd, "    Client priority level: %d\n", clientDescriptor->getPriority());
+        dprintf(fd, "    Client PID: %d\n", clientDescriptor->getOwnerId());
 
+        auto client = clientDescriptor->getValue();
+        dprintf(fd, "    Client package: %s\n",
+                String8(client->getPackageName()).string());
+
+        client->dumpClient(fd, args);
+
+        if (mModule != nullptr) {
+            dprintf(fd, "== Camera HAL device %s static information: ==\n", cameraId.string());
+
+            camera_info info;
             status_t rc = mModule->getCameraInfo(cameraIdToInt(cameraId), &info);
+            int deviceVersion = -1;
             if (rc != OK) {
-                result.appendFormat("  Error reading static information!\n");
-                write(fd, result.string(), result.size());
+                dprintf(fd, "  Error reading static information!\n");
             } else {
-                result.appendFormat("  Facing: %s\n",
+                dprintf(fd, "  Facing: %s\n",
                         info.facing == CAMERA_FACING_BACK ? "BACK" :
-                                info.facing == CAMERA_FACING_FRONT ? "FRONT" : "EXTERNAL");
-                result.appendFormat("  Orientation: %d\n", info.orientation);
-                int deviceVersion;
+                        info.facing == CAMERA_FACING_FRONT ? "FRONT" : "EXTERNAL");
+                dprintf(fd, "  Orientation: %d\n", info.orientation);
+
                 if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0) {
                     deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
                 } else {
                     deviceVersion = info.device_version;
                 }
-
-                auto conflicting = state.second->getConflicting();
-                result.appendFormat("  Resource Cost: %d\n", state.second->getCost());
-                result.appendFormat("  Conflicting Devices:");
-                for (auto& id : conflicting) {
-                    result.appendFormat(" %s", id.string());
-                }
-                if (conflicting.size() == 0) {
-                    result.appendFormat(" NONE");
-                }
-                result.appendFormat("\n");
-
-                result.appendFormat("  Device version: %#x\n", deviceVersion);
-                if (deviceVersion >= CAMERA_DEVICE_API_VERSION_3_0) {
-                    result.appendFormat("  Device static metadata:\n");
-                    write(fd, result.string(), result.size());
-                    dump_indented_camera_metadata(info.static_camera_characteristics,
-                            fd, /*verbosity*/2, /*indentation*/4);
-                } else {
-                    write(fd, result.string(), result.size());
-                }
-
-                CameraParameters p = state.second->getShimParams();
-                if (!p.isEmpty()) {
-                    result = String8::format("  Camera1 API shim is using parameters:\n        ");
-                    write(fd, result.string(), result.size());
-                    p.dump(fd, args);
-                }
             }
 
-            auto clientDescriptor = mActiveClientManager.get(cameraId);
-            if (clientDescriptor == nullptr) {
-                result = String8::format("  Device %s is closed, no client instance\n",
-                        cameraId.string());
-                write(fd, result.string(), result.size());
-                continue;
+            auto conflicting = state.second->getConflicting();
+            dprintf(fd, "  Resource Cost: %d\n", state.second->getCost());
+            dprintf(fd, "  Conflicting Devices:");
+            for (auto& id : conflicting) {
+                dprintf(fd, " %s", id.string());
             }
-            hasClient = true;
-            result = String8::format("  Device %s is open. Client instance dump:\n\n",
-                    cameraId.string());
-            result.appendFormat("Client priority level: %d\n", clientDescriptor->getPriority());
-            result.appendFormat("Client PID: %d\n", clientDescriptor->getOwnerId());
+            if (conflicting.size() == 0) {
+                dprintf(fd, " NONE");
+            }
+            dprintf(fd, "\n");
 
-            auto client = clientDescriptor->getValue();
-            result.appendFormat("Client package: %s\n",
-                    String8(client->getPackageName()).string());
-            write(fd, result.string(), result.size());
-
-            client->dumpClient(fd, args);
+            dprintf(fd, "  Device version: %#x\n", deviceVersion);
+            if (deviceVersion >= CAMERA_DEVICE_API_VERSION_3_0) {
+                dprintf(fd, "  Device static metadata:\n");
+                dump_indented_camera_metadata(info.static_camera_characteristics,
+                        fd, /*verbosity*/2, /*indentation*/4);
+            }
         }
 
-        if (stateLocked) mCameraStatesLock.unlock();
+    }
 
-        if (!hasClient) {
-            result = String8::format("\nNo active camera clients yet.\n");
-            write(fd, result.string(), result.size());
-        }
+    if (stateLocked) mCameraStatesLock.unlock();
 
-        if (locked) mServiceLock.unlock();
+    if (locked) mServiceLock.unlock();
 
-        // Dump camera traces if there were any
-        write(fd, "\n", 1);
-        camera3::CameraTraces::dump(fd, args);
+    if (mModule == nullptr) {
+        mCameraProviderManager->dump(fd, args);
+    } else {
+        dprintf(fd, "\n== Camera Module HAL static info: ==\n");
+        dprintf(fd, "Camera module HAL API version: 0x%x\n", mModule->getHalApiVersion());
+        dprintf(fd, "Camera module API version: 0x%x\n", mModule->getModuleApiVersion());
+        dprintf(fd, "Camera module name: %s\n", mModule->getModuleName());
+        dprintf(fd, "Camera module author: %s\n", mModule->getModuleAuthor());
+    }
 
-        // Process dump arguments, if any
-        int n = args.size();
-        String16 verboseOption("-v");
-        String16 unreachableOption("--unreachable");
-        for (int i = 0; i < n; i++) {
-            if (args[i] == verboseOption) {
-                // change logging level
-                if (i + 1 >= n) continue;
-                String8 levelStr(args[i+1]);
-                int level = atoi(levelStr.string());
-                result = String8::format("\nSetting log level to %d.\n", level);
-                setLogLevel(level);
-                write(fd, result.string(), result.size());
-            } else if (args[i] == unreachableOption) {
-                // Dump memory analysis
-                // TODO - should limit be an argument parameter?
-                UnreachableMemoryInfo info;
-                bool success = GetUnreachableMemory(info, /*limit*/ 10000);
-                if (!success) {
-                    dprintf(fd, "\nUnable to dump unreachable memory. "
-                            "Try disabling SELinux enforcement.\n");
-                } else {
-                    dprintf(fd, "\nDumping unreachable memory:\n");
-                    std::string s = info.ToString(/*log_contents*/ true);
-                    write(fd, s.c_str(), s.size());
-                }
+    dprintf(fd, "\n== Vendor tags: ==\n\n");
+
+    sp<VendorTagDescriptor> desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
+    if (desc == NULL) {
+        dprintf(fd, "No vendor tags.\n");
+    } else {
+        desc->dump(fd, /*verbosity*/2, /*indentation*/2);
+    }
+
+    // Dump camera traces if there were any
+    dprintf(fd, "\n");
+    camera3::CameraTraces::dump(fd, args);
+
+    // Process dump arguments, if any
+    int n = args.size();
+    String16 verboseOption("-v");
+    String16 unreachableOption("--unreachable");
+    for (int i = 0; i < n; i++) {
+        if (args[i] == verboseOption) {
+            // change logging level
+            if (i + 1 >= n) continue;
+            String8 levelStr(args[i+1]);
+            int level = atoi(levelStr.string());
+            dprintf(fd, "\nSetting log level to %d.\n", level);
+            setLogLevel(level);
+        } else if (args[i] == unreachableOption) {
+            // Dump memory analysis
+            // TODO - should limit be an argument parameter?
+            UnreachableMemoryInfo info;
+            bool success = GetUnreachableMemory(info, /*limit*/ 10000);
+            if (!success) {
+                dprintf(fd, "\n== Unable to dump unreachable memory. "
+                        "Try disabling SELinux enforcement. ==\n");
+            } else {
+                dprintf(fd, "\n== Dumping unreachable memory: ==\n");
+                std::string s = info.ToString(/*log_contents*/ true);
+                write(fd, s.c_str(), s.size());
             }
         }
     }
@@ -2917,21 +2909,19 @@
 }
 
 void CameraService::dumpEventLog(int fd) {
-    String8 result = String8("\nPrior client events (most recent at top):\n");
+    dprintf(fd, "\n== Camera service events log (most recent at top): ==\n");
 
     Mutex::Autolock l(mLogLock);
     for (const auto& msg : mEventLog) {
-        result.appendFormat("  %s\n", msg.string());
+        dprintf(fd, "  %s\n", msg.string());
     }
 
     if (mEventLog.size() == DEFAULT_EVENT_LOG_LENGTH) {
-        result.append("  ...\n");
+        dprintf(fd, "  ...\n");
     } else if (mEventLog.size() == 0) {
-        result.append("  [no events yet]\n");
+        dprintf(fd, "  [no events yet]\n");
     }
-    result.append("\n");
-
-    write(fd, result.string(), result.size());
+    dprintf(fd, "\n");
 }
 
 void CameraService::handleTorchClientBinderDied(const wp<IBinder> &who) {
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index d490119..2618838 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -152,6 +152,7 @@
     }
 
     List<const CameraMetadata> metadataRequestList;
+    std::list<const SurfaceMap> surfaceMapList;
     submitInfo->mRequestId = mRequestIdCounter;
     uint32_t loopCounter = 0;
 
@@ -191,11 +192,11 @@
         }
 
         /**
-         * Write in the output stream IDs which we calculate from
-         * the capture request's list of surface targets
+         * Write in the output stream IDs and map from stream ID to surface ID
+         * which we calculate from the capture request's list of surface target
          */
+        SurfaceMap surfaceMap;
         Vector<int32_t> outputStreamIds;
-        outputStreamIds.setCapacity(request.mSurfaceList.size());
         for (sp<Surface> surface : request.mSurfaceList) {
             if (surface == 0) continue;
 
@@ -211,10 +212,16 @@
                         "Request targets Surface that is not part of current capture session");
             }
 
-            int streamId = mStreamMap.valueAt(idx);
-            outputStreamIds.push_back(streamId);
-            ALOGV("%s: Camera %s: Appending output stream %d to request",
-                    __FUNCTION__, mCameraIdStr.string(), streamId);
+            const StreamSurfaceId& streamSurfaceId = mStreamMap.valueAt(idx);
+            if (surfaceMap.find(streamSurfaceId.streamId()) == surfaceMap.end()) {
+                surfaceMap[streamSurfaceId.streamId()] = std::vector<size_t>();
+                outputStreamIds.push_back(streamSurfaceId.streamId());
+            }
+            surfaceMap[streamSurfaceId.streamId()].push_back(streamSurfaceId.surfaceId());
+
+            ALOGV("%s: Camera %s: Appending output stream %d surface %d to request",
+                    __FUNCTION__, mCameraIdStr.string(), streamSurfaceId.streamId(),
+                    streamSurfaceId.surfaceId());
         }
 
         metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
@@ -231,11 +238,13 @@
                 loopCounter, requests.size());
 
         metadataRequestList.push_back(metadata);
+        surfaceMapList.push_back(surfaceMap);
     }
     mRequestIdCounter++;
 
     if (streaming) {
-        err = mDevice->setStreamingRequestList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+        err = mDevice->setStreamingRequestList(metadataRequestList, surfaceMapList,
+                &(submitInfo->mLastFrameNumber));
         if (err != OK) {
             String8 msg = String8::format(
                 "Camera %s:  Got error %s (%d) after trying to set streaming request",
@@ -248,7 +257,8 @@
             mStreamingRequestId = submitInfo->mRequestId;
         }
     } else {
-        err = mDevice->captureList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+        err = mDevice->captureList(metadataRequestList, surfaceMapList,
+                &(submitInfo->mLastFrameNumber));
         if (err != OK) {
             String8 msg = String8::format(
                 "Camera %s: Got error %s (%d) after trying to submit capture request",
@@ -312,8 +322,9 @@
 }
 
 binder::Status CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) {
-    ALOGV("%s: ending configure (%d input stream, %zu output streams)",
-            __FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size());
+    ALOGV("%s: ending configure (%d input stream, %zu output surfaces)",
+            __FUNCTION__, mInputStream.configured ? 1 : 0,
+            mStreamMap.size());
 
     binder::Status res;
     if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
@@ -376,7 +387,7 @@
     }
 
     bool isInput = false;
-    ssize_t index = NAME_NOT_FOUND;
+    std::vector<sp<IBinder>> surfaces;
     ssize_t dIndex = NAME_NOT_FOUND;
 
     if (mInputStream.configured && mInputStream.id == streamId) {
@@ -384,26 +395,24 @@
     } else {
         // Guard against trying to delete non-created streams
         for (size_t i = 0; i < mStreamMap.size(); ++i) {
-            if (streamId == mStreamMap.valueAt(i)) {
-                index = i;
+            if (streamId == mStreamMap.valueAt(i).streamId()) {
+                surfaces.push_back(mStreamMap.keyAt(i));
+            }
+        }
+
+        // See if this stream is one of the deferred streams.
+        for (size_t i = 0; i < mDeferredStreams.size(); ++i) {
+            if (streamId == mDeferredStreams[i]) {
+                dIndex = i;
                 break;
             }
         }
 
-        if (index == NAME_NOT_FOUND) {
-            // See if this stream is one of the deferred streams.
-            for (size_t i = 0; i < mDeferredStreams.size(); ++i) {
-                if (streamId == mDeferredStreams[i]) {
-                    dIndex = i;
-                    break;
-                }
-            }
-            if (dIndex == NAME_NOT_FOUND) {
-                String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no such"
-                        " stream created yet", mCameraIdStr.string(), streamId);
-                ALOGW("%s: %s", __FUNCTION__, msg.string());
-                return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
-            }
+        if (surfaces.empty() && dIndex == NAME_NOT_FOUND) {
+            String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no such"
+                    " stream created yet", mCameraIdStr.string(), streamId);
+            ALOGW("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
         }
     }
 
@@ -418,10 +427,14 @@
     } else {
         if (isInput) {
             mInputStream.configured = false;
-        } else if (index != NAME_NOT_FOUND) {
-            mStreamMap.removeItemsAt(index);
         } else {
-            mDeferredStreams.removeItemsAt(dIndex);
+            for (auto& surface : surfaces) {
+                mStreamMap.removeItem(surface);
+            }
+
+            if (dIndex != NAME_NOT_FOUND) {
+                mDeferredStreams.removeItemsAt(dIndex);
+            }
         }
     }
 
@@ -439,14 +452,25 @@
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
-    bool deferredConsumer = bufferProducer == NULL;
+    const std::vector<sp<IGraphicBufferProducer>>& bufferProducers =
+            outputConfiguration.getGraphicBufferProducers();
+    size_t numBufferProducers = bufferProducers.size();
+    bool deferredConsumer = outputConfiguration.isDeferred();
+    bool isShared = outputConfiguration.isShared();
+
+    if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
+        ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
+              __FUNCTION__, bufferProducers.size(), MAX_SURFACES_PER_STREAM);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Surface count is too high");
+    }
+    bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 0;
     int surfaceType = outputConfiguration.getSurfaceType();
     bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
             (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
+
     if (deferredConsumer && !validSurfaceType) {
         ALOGE("%s: Target surface is invalid: bufferProducer = %p, surfaceType = %d.",
-                __FUNCTION__, bufferProducer.get(), surfaceType);
+                __FUNCTION__, bufferProducers[0].get(), surfaceType);
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
     }
 
@@ -454,116 +478,67 @@
         return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
     }
 
-    int width, height, format;
-    int32_t consumerUsage;
-    android_dataspace dataSpace;
+    std::vector<sp<Surface>> surfaces;
+    std::vector<sp<IBinder>> binders;
     status_t err;
 
     // Create stream for deferred surface case.
-    if (deferredConsumer) {
-        return createDeferredSurfaceStreamLocked(outputConfiguration, newStreamId);
+    if (deferredConsumerOnly) {
+        return createDeferredSurfaceStreamLocked(outputConfiguration, isShared, newStreamId);
     }
 
-    // Don't create multiple streams for the same target surface
-    {
-        ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
+    OutputStreamInfo streamInfo;
+    bool isStreamInfoValid = false;
+    for (auto& bufferProducer : bufferProducers) {
+        // Don't create multiple streams for the same target surface
+        sp<IBinder> binder = IInterface::asBinder(bufferProducer);
+        ssize_t index = mStreamMap.indexOfKey(binder);
         if (index != NAME_NOT_FOUND) {
             String8 msg = String8::format("Camera %s: Surface already has a stream created for it "
                     "(ID %zd)", mCameraIdStr.string(), index);
             ALOGW("%s: %s", __FUNCTION__, msg.string());
             return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
         }
-    }
 
-    // HACK b/10949105
-    // Query consumer usage bits to set async operation mode for
-    // GLConsumer using controlledByApp parameter.
-    bool useAsync = false;
-    if ((err = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
-            &consumerUsage)) != OK) {
-        String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
-                mCameraIdStr.string(), strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
-        ALOGW("%s: Camera %s with consumer usage flag: 0x%x: Forcing asynchronous mode for stream",
-                __FUNCTION__, mCameraIdStr.string(), consumerUsage);
-        useAsync = true;
-    }
+        sp<Surface> surface;
+        res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer);
 
-    int32_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
-                              GRALLOC_USAGE_RENDERSCRIPT;
-    int32_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
-                           GraphicBuffer::USAGE_HW_TEXTURE |
-                           GraphicBuffer::USAGE_HW_COMPOSER;
-    bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
-            (consumerUsage & allowedFlags) != 0;
+        if (!res.isOk())
+            return res;
 
-    sp<IBinder> binder = IInterface::asBinder(bufferProducer);
-    sp<Surface> surface = new Surface(bufferProducer, useAsync);
-    ANativeWindow *anw = surface.get();
+        if (!isStreamInfoValid) {
+            isStreamInfoValid = true;
+        }
 
-    if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
-        String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
-                mCameraIdStr.string(), strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
-        String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
-                mCameraIdStr.string(), strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
-        String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
-                mCameraIdStr.string(), strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
-                            reinterpret_cast<int*>(&dataSpace))) != OK) {
-        String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
-                mCameraIdStr.string(), strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-
-    // FIXME: remove this override since the default format should be
-    //       IMPLEMENTATION_DEFINED. b/9487482
-    if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
-        format <= HAL_PIXEL_FORMAT_BGRA_8888) {
-        ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
-              __FUNCTION__, mCameraIdStr.string(), format);
-        format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
-    }
-
-    // Round dimensions to the nearest dimensions available for this format
-    if (flexibleConsumer && isPublicFormat(format) &&
-            !CameraDeviceClient::roundBufferDimensionNearest(width, height,
-            format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
-        String8 msg = String8::format("Camera %s: No supported stream configurations with "
-                "format %#x defined, failed to create output stream", mCameraIdStr.string(), format);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        binders.push_back(IInterface::asBinder(bufferProducer));
+        surfaces.push_back(surface);
     }
 
     int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
-    err = mDevice->createStream(surface, width, height, format, dataSpace,
+    err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
+            streamInfo.height, streamInfo.format, streamInfo.dataSpace,
             static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
-            &streamId, outputConfiguration.getSurfaceSetID());
+            &streamId, outputConfiguration.getSurfaceSetID(), isShared);
 
     if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
                 "Camera %s: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
-                mCameraIdStr.string(), width, height, format, dataSpace, strerror(-err), err);
+                mCameraIdStr.string(), streamInfo.width, streamInfo.height, streamInfo.format,
+                streamInfo.dataSpace, strerror(-err), err);
     } else {
-        mStreamMap.add(binder, streamId);
+        int i = 0;
+        for (auto& binder : binders) {
+            ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %d",
+                    __FUNCTION__, binder.get(), streamId, i);
+            mStreamMap.add(binder, StreamSurfaceId(streamId, i++));
+        }
+
+        mStreamInfoMap[streamId] = streamInfo;
 
         ALOGV("%s: Camera %s: Successfully created a new stream ID %d for output surface"
-                " (%d x %d) with format 0x%x.",
-              __FUNCTION__, mCameraIdStr.string(), streamId, width, height, format);
+                    " (%d x %d) with format 0x%x.",
+                  __FUNCTION__, mCameraIdStr.string(), streamId, streamInfo.width,
+                  streamInfo.height, streamInfo.format);
 
         // Set transform flags to ensure preview to be rotated correctly.
         res = setStreamTransformLocked(streamId);
@@ -576,6 +551,7 @@
 
 binder::Status CameraDeviceClient::createDeferredSurfaceStreamLocked(
         const hardware::camera2::params::OutputConfiguration &outputConfiguration,
+        bool isShared,
         /*out*/
         int* newStreamId) {
     int width, height, format, surfaceType;
@@ -600,9 +576,11 @@
         consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
     }
     int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
-    err = mDevice->createStream(/*surface*/nullptr, width, height, format, dataSpace,
+    std::vector<sp<Surface>> noSurface;
+    err = mDevice->createStream(noSurface, /*hasDeferredConsumer*/true, width,
+            height, format, dataSpace,
             static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
-            &streamId, outputConfiguration.getSurfaceSetID(), consumerUsage);
+            &streamId, outputConfiguration.getSurfaceSetID(), isShared, consumerUsage);
 
     if (err != OK) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
@@ -614,6 +592,9 @@
         // relocated to mStreamMap.
         mDeferredStreams.push_back(streamId);
 
+        mStreamInfoMap.emplace(std::piecewise_construct, std::forward_as_tuple(streamId),
+                std::forward_as_tuple(width, height, format, dataSpace, consumerUsage));
+
         ALOGV("%s: Camera %s: Successfully created a new stream ID %d for a deferred surface"
                 " (%d x %d) stream with format 0x%x.",
               __FUNCTION__, mCameraIdStr.string(), streamId, width, height, format);
@@ -756,6 +737,140 @@
     }
 }
 
+binder::Status CameraDeviceClient::createSurfaceFromGbp(
+        OutputStreamInfo& streamInfo, bool isStreamInfoValid,
+        sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp) {
+
+    // bufferProducer must be non-null
+    if (gbp == nullptr) {
+        String8 msg = String8::format("Camera %s: Surface is NULL", mCameraIdStr.string());
+        ALOGW("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+    // HACK b/10949105
+    // Query consumer usage bits to set async operation mode for
+    // GLConsumer using controlledByApp parameter.
+    bool useAsync = false;
+    int32_t consumerUsage;
+    status_t err;
+    if ((err = gbp->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
+            &consumerUsage)) != OK) {
+        String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
+                mCameraIdStr.string(), strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+    }
+    if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
+        ALOGW("%s: Camera %s with consumer usage flag: 0x%x: Forcing asynchronous mode for stream",
+                __FUNCTION__, mCameraIdStr.string(), consumerUsage);
+        useAsync = true;
+    }
+
+    int32_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
+                              GRALLOC_USAGE_RENDERSCRIPT;
+    int32_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
+                           GraphicBuffer::USAGE_HW_TEXTURE |
+                           GraphicBuffer::USAGE_HW_COMPOSER;
+    bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
+            (consumerUsage & allowedFlags) != 0;
+
+    surface = new Surface(gbp, useAsync);
+    ANativeWindow *anw = surface.get();
+
+    int width, height, format;
+    android_dataspace dataSpace;
+    if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
+        String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
+                 mCameraIdStr.string(), strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+    }
+    if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+        String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
+                mCameraIdStr.string(), strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+    }
+    if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+        String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
+                mCameraIdStr.string(), strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+    }
+    if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
+            reinterpret_cast<int*>(&dataSpace))) != OK) {
+        String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
+                mCameraIdStr.string(), strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+    }
+
+    // FIXME: remove this override since the default format should be
+    //       IMPLEMENTATION_DEFINED. b/9487482
+    if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
+        format <= HAL_PIXEL_FORMAT_BGRA_8888) {
+        ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
+                __FUNCTION__, mCameraIdStr.string(), format);
+        format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    }
+    // Round dimensions to the nearest dimensions available for this format
+    if (flexibleConsumer && isPublicFormat(format) &&
+            !CameraDeviceClient::roundBufferDimensionNearest(width, height,
+            format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
+        String8 msg = String8::format("Camera %s: No supported stream configurations with "
+                "format %#x defined, failed to create output stream",
+                mCameraIdStr.string(), format);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+
+    if (!isStreamInfoValid) {
+        streamInfo.width = width;
+        streamInfo.height = height;
+        streamInfo.format = format;
+        streamInfo.dataSpace = dataSpace;
+        streamInfo.consumerUsage = consumerUsage;
+        return binder::Status::ok();
+    }
+    if (width != streamInfo.width) {
+        String8 msg = String8::format("Camera %s:Surface width doesn't match: %d vs %d",
+                mCameraIdStr.string(), width, streamInfo.width);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+    if (height != streamInfo.height) {
+        String8 msg = String8::format("Camera %s:Surface height doesn't match: %d vs %d",
+                 mCameraIdStr.string(), height, streamInfo.height);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+    if (format != streamInfo.format) {
+        String8 msg = String8::format("Camera %s:Surface format doesn't match: %d vs %d",
+                 mCameraIdStr.string(), format, streamInfo.format);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+    if (dataSpace != streamInfo.dataSpace) {
+        String8 msg = String8::format("Camera %s:Surface dataSpace doesn't match: %d vs %d",
+                 mCameraIdStr.string(), dataSpace, streamInfo.dataSpace);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+    //At the native side, there isn't a way to check whether 2 surfaces come from the same
+    //surface class type. Use usage flag to approximate the comparison. Treat
+    //different preview surface usage flags as the same.
+    int32_t previewUsageMask =
+            GraphicBuffer::USAGE_HW_TEXTURE | GraphicBuffer::USAGE_HW_COMPOSER;
+    if ((consumerUsage & ~previewUsageMask) != (streamInfo.consumerUsage & ~previewUsageMask)) {
+        String8 msg = String8::format(
+                "Camera %s:Surface usage flag doesn't match 0x%x vs 0x%x",
+                mCameraIdStr.string(), consumerUsage, streamInfo.consumerUsage);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+    return binder::Status::ok();
+}
+
 bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height,
         int32_t format, android_dataspace dataSpace, const CameraMetadata& info,
         /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) {
@@ -944,7 +1059,7 @@
     // Guard against trying to prepare non-created streams
     ssize_t index = NAME_NOT_FOUND;
     for (size_t i = 0; i < mStreamMap.size(); ++i) {
-        if (streamId == mStreamMap.valueAt(i)) {
+        if (streamId == mStreamMap.valueAt(i).streamId()) {
             index = i;
             break;
         }
@@ -984,7 +1099,7 @@
     // Guard against trying to prepare non-created streams
     ssize_t index = NAME_NOT_FOUND;
     for (size_t i = 0; i < mStreamMap.size(); ++i) {
-        if (streamId == mStreamMap.valueAt(i)) {
+        if (streamId == mStreamMap.valueAt(i).streamId()) {
             index = i;
             break;
         }
@@ -1032,7 +1147,7 @@
     // Guard against trying to prepare non-created streams
     ssize_t index = NAME_NOT_FOUND;
     for (size_t i = 0; i < mStreamMap.size(); ++i) {
-        if (streamId == mStreamMap.valueAt(i)) {
+        if (streamId == mStreamMap.valueAt(i).streamId()) {
             index = i;
             break;
         }
@@ -1061,7 +1176,7 @@
     return res;
 }
 
-binder::Status CameraDeviceClient::setDeferredConfiguration(int32_t streamId,
+binder::Status CameraDeviceClient::finalizeOutputConfigurations(int32_t streamId,
         const hardware::camera2::params::OutputConfiguration &outputConfiguration) {
     ATRACE_CALL();
 
@@ -1070,22 +1185,32 @@
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
+    const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
+            outputConfiguration.getGraphicBufferProducers();
 
-    // Client code should guarantee that the surface is from SurfaceView or SurfaceTexture.
-    if (bufferProducer == NULL) {
-        ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
+    if (bufferProducers.size() == 0) {
+        ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
     }
-    // Check if this stram id is one of the deferred streams
-    ssize_t index = NAME_NOT_FOUND;
-    for (size_t i = 0; i < mDeferredStreams.size(); i++) {
-        if (streamId == mDeferredStreams[i]) {
-            index = i;
+
+    // streamId should be in mStreamMap if this stream already has a surface attached
+    // to it. Otherwise, it should be in mDeferredStreams.
+    bool streamIdConfigured = false;
+    ssize_t deferredStreamIndex = NAME_NOT_FOUND;
+    for (size_t i = 0; i < mStreamMap.size(); i++) {
+        if (mStreamMap.valueAt(i).streamId() == streamId) {
+            streamIdConfigured = true;
             break;
         }
     }
-    if (index == NAME_NOT_FOUND) {
+    for (size_t i = 0; i < mDeferredStreams.size(); i++) {
+        if (streamId == mDeferredStreams[i]) {
+            deferredStreamIndex = i;
+            break;
+        }
+
+    }
+    if (deferredStreamIndex == NAME_NOT_FOUND && !streamIdConfigured) {
         String8 msg = String8::format("Camera %s: deferred surface is set to a unknown stream"
                 "(ID %d)", mCameraIdStr.string(), streamId);
         ALOGW("%s: %s", __FUNCTION__, msg.string());
@@ -1096,28 +1221,52 @@
         return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
     }
 
-    // Don't create multiple streams for the same target surface
-    {
+    std::vector<sp<Surface>> consumerSurfaces;
+    std::vector<size_t> consumerSurfaceIds;
+    size_t surfaceId = 0;
+    for (auto& bufferProducer : bufferProducers) {
+        // Don't create multiple streams for the same target surface
         ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
         if (index != NAME_NOT_FOUND) {
-            String8 msg = String8::format("Camera %s: Surface already has a stream created "
+            ALOGV("Camera %s: Surface already has a stream created "
                     " for it (ID %zd)", mCameraIdStr.string(), index);
-            ALOGW("%s: %s", __FUNCTION__, msg.string());
-            return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
+            surfaceId++;
+            continue;
         }
+
+        sp<Surface> surface;
+        res = createSurfaceFromGbp(mStreamInfoMap[streamId], true /*isStreamInfoValid*/,
+                surface, bufferProducer);
+
+        if (!res.isOk())
+            return res;
+
+        consumerSurfaces.push_back(surface);
+        consumerSurfaceIds.push_back(surfaceId);
+        surfaceId++;
     }
 
-    status_t err;
-
-    // Always set to async, as we know the deferred surface is for preview streaming.
-    sp<Surface> consumerSurface = new Surface(bufferProducer, /*useAsync*/true);
+    if (consumerSurfaces.size() == 0) {
+        String8 msg = String8::format("Camera %s: New OutputConfiguration has the same surfaces"
+                " for stream (ID %d)", mCameraIdStr.string(), streamId);
+        ALOGW("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
 
     // Finish the deferred stream configuration with the surface.
-    err = mDevice->setConsumerSurface(streamId, consumerSurface);
+    status_t err;
+    err = mDevice->setConsumerSurfaces(streamId, consumerSurfaces);
     if (err == OK) {
-        sp<IBinder> binder = IInterface::asBinder(bufferProducer);
-        mStreamMap.add(binder, streamId);
-        mDeferredStreams.removeItemsAt(index);
+        for (size_t i = 0; i < consumerSurfaces.size(); i++) {
+            sp<IBinder> binder = IInterface::asBinder(
+                    consumerSurfaces[i]->getIGraphicBufferProducer());
+            ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %zu", __FUNCTION__,
+                    binder.get(), streamId, consumerSurfaceIds[i]);
+            mStreamMap.add(binder, StreamSurfaceId(streamId, consumerSurfaceIds[i]));
+        }
+        if (deferredStreamIndex != NAME_NOT_FOUND) {
+            mDeferredStreams.removeItemsAt(deferredStreamIndex);
+        }
     } else if (err == NO_INIT) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
                 "Camera %s: Deferred surface is invalid: %s (%d)",
@@ -1136,35 +1285,34 @@
 }
 
 status_t CameraDeviceClient::dumpClient(int fd, const Vector<String16>& args) {
-    String8 result;
-    result.appendFormat("CameraDeviceClient[%s] (%p) dump:\n",
+    dprintf(fd, "  CameraDeviceClient[%s] (%p) dump:\n",
             mCameraIdStr.string(),
             (getRemoteCallback() != NULL ?
                     IInterface::asBinder(getRemoteCallback()).get() : NULL) );
-    result.appendFormat("  Current client UID %u\n", mClientUid);
+    dprintf(fd, "    Current client UID %u\n", mClientUid);
 
-    result.append("  State:\n");
-    result.appendFormat("    Request ID counter: %d\n", mRequestIdCounter);
+    dprintf(fd, "    State:\n");
+    dprintf(fd, "      Request ID counter: %d\n", mRequestIdCounter);
     if (mInputStream.configured) {
-        result.appendFormat("    Current input stream ID: %d\n",
-                    mInputStream.id);
+        dprintf(fd, "      Current input stream ID: %d\n", mInputStream.id);
     } else {
-        result.append("    No input stream configured.\n");
+        dprintf(fd, "      No input stream configured.\n");
     }
     if (!mStreamMap.isEmpty()) {
-        result.append("    Current output stream IDs:\n");
+        dprintf(fd, "      Current output stream/surface IDs:\n");
         for (size_t i = 0; i < mStreamMap.size(); i++) {
-            result.appendFormat("      Stream %d\n", mStreamMap.valueAt(i));
+            dprintf(fd, "        Stream %d Surface %d\n",
+                                mStreamMap.valueAt(i).streamId(),
+                                mStreamMap.valueAt(i).surfaceId());
         }
     } else if (!mDeferredStreams.isEmpty()) {
-        result.append("    Current deferred surface output stream IDs:\n");
+        dprintf(fd, "      Current deferred surface output stream IDs:\n");
         for (auto& streamId : mDeferredStreams) {
-            result.appendFormat("      Stream %d\n", streamId);
+            dprintf(fd, "        Stream %d\n", streamId);
         }
     } else {
-        result.append("    No output streams configured.\n");
+        dprintf(fd, "      No output streams configured.\n");
     }
-    write(fd, result.string(), result.size());
     // TODO: print dynamic/request section from most recent requests
     mFrameProcessor->dump(fd, args);
 
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 2226dd2..2f6d414 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -131,8 +131,8 @@
     // Prepare stream by preallocating up to maxCount of its buffers
     virtual binder::Status prepare2(int32_t maxCount, int32_t streamId);
 
-    // Set the deferred surface for a stream.
-    virtual binder::Status setDeferredConfiguration(int32_t streamId,
+    // Finalize the output configurations with surfaces not added before.
+    virtual binder::Status finalizeOutputConfigurations(int32_t streamId,
             const hardware::camera2::params::OutputConfiguration &outputConfiguration);
 
     /**
@@ -180,6 +180,51 @@
     status_t              getRotationTransformLocked(/*out*/int32_t* transform);
 
 private:
+    // StreamSurfaceId encapsulates streamId + surfaceId for a particular surface.
+    // streamId specifies the index of the stream the surface belongs to, and the
+    // surfaceId specifies the index of the surface within the stream. (one stream
+    // could contain multiple surfaces.)
+    class StreamSurfaceId final {
+    public:
+        StreamSurfaceId() {
+            mStreamId = -1;
+            mSurfaceId = -1;
+        }
+        StreamSurfaceId(int32_t streamId, int32_t surfaceId) {
+            mStreamId = streamId;
+            mSurfaceId = surfaceId;
+        }
+        int32_t streamId() const {
+            return mStreamId;
+        }
+        int32_t surfaceId() const {
+            return mSurfaceId;
+        }
+
+    private:
+        int32_t mStreamId;
+        int32_t mSurfaceId;
+
+    }; // class StreamSurfaceId
+
+    // OutputStreamInfo describes the property of a camera stream.
+    class OutputStreamInfo {
+    public:
+        int width;
+        int height;
+        int format;
+        android_dataspace dataSpace;
+        int32_t consumerUsage;
+        OutputStreamInfo() :
+                width(-1), height(-1), format(-1), dataSpace(HAL_DATASPACE_UNKNOWN),
+                consumerUsage(0) {}
+        OutputStreamInfo(int _width, int _height, int _format, android_dataspace _dataSpace,
+                int32_t _consumerUsage) :
+                    width(_width), height(_height), format(_format),
+                    dataSpace(_dataSpace), consumerUsage(_consumerUsage) {}
+    };
+
+private:
     /** ICameraDeviceUser interface-related private members */
 
     /** Preview callback related members */
@@ -200,6 +245,7 @@
     // Create an output stream with surface deferred for future.
     binder::Status createDeferredSurfaceStreamLocked(
             const hardware::camera2::params::OutputConfiguration &outputConfiguration,
+            bool isShared,
             int* newStreamId = NULL);
 
     // Set the stream transform flags to automatically rotate the camera stream for preview use
@@ -216,8 +262,13 @@
     //check if format is not custom format
     static bool isPublicFormat(int32_t format);
 
-    // IGraphicsBufferProducer binder -> Stream ID for output streams
-    KeyedVector<sp<IBinder>, int> mStreamMap;
+    // Create a Surface from an IGraphicBufferProducer. Returns error if
+    // IGraphicBufferProducer's property doesn't match with streamInfo
+    binder::Status createSurfaceFromGbp(OutputStreamInfo& streamInfo, bool isStreamInfoValid,
+            sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp);
+
+    // IGraphicsBufferProducer binder -> Stream ID + Surface ID for output streams
+    KeyedVector<sp<IBinder>, StreamSurfaceId> mStreamMap;
 
     struct InputStreamConfiguration {
         bool configured;
@@ -238,6 +289,11 @@
     // as there are no surfaces available and can not be put into mStreamMap. Once the deferred
     // Surface is configured, the stream id will be moved to mStreamMap.
     Vector<int32_t> mDeferredStreams;
+
+    // stream ID -> outputStreamInfo mapping
+    std::unordered_map<int32_t, OutputStreamInfo> mStreamInfoMap;
+
+    static const int32_t MAX_SURFACES_PER_STREAM = 2;
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 40b368e..98a3fcc 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
 #define ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
 
+#include <list>
+
 #include <utils/RefBase.h>
 #include <utils/String8.h>
 #include <utils/String16.h>
@@ -37,6 +39,9 @@
 
 class CameraProviderManager;
 
+// Mapping of output stream index to surface ids
+typedef std::unordered_map<int, std::vector<size_t> > SurfaceMap;
+
 /**
  * Base interface for version >= 2 camera device classes, which interface to
  * camera HAL device versions >= 2.
@@ -73,6 +78,7 @@
      * Output lastFrameNumber is the expected last frame number of the list of requests.
      */
     virtual status_t captureList(const List<const CameraMetadata> &requests,
+                                 const std::list<const SurfaceMap> &surfaceMaps,
                                  int64_t *lastFrameNumber = NULL) = 0;
 
     /**
@@ -88,6 +94,7 @@
      * Output lastFrameNumber is the last frame number of the previous streaming request.
      */
     virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+                                             const std::list<const SurfaceMap> &surfaceMaps,
                                              int64_t *lastFrameNumber = NULL) = 0;
 
     /**
@@ -114,7 +121,20 @@
             uint32_t width, uint32_t height, int format,
             android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
-            uint32_t consumerUsage = 0) = 0;
+            bool isShared = false, uint32_t consumerUsage = 0) = 0;
+
+    /**
+     * Create an output stream of the requested size, format, rotation and
+     * dataspace with a number of consumers.
+     *
+     * For HAL_PIXEL_FORMAT_BLOB formats, the width and height should be the
+     * logical dimensions of the buffer, not the number of bytes.
+     */
+    virtual status_t createStream(const std::vector<sp<Surface>>& consumers,
+            bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+            bool isShared = false, uint32_t consumerUsage = 0) = 0;
 
     /**
      * Create an input stream of width, height, and format.
@@ -321,7 +341,8 @@
     /**
      * Set the deferred consumer surface and finish the rest of the stream configuration.
      */
-    virtual status_t setConsumerSurface(int streamId, sp<Surface> consumer) = 0;
+    virtual status_t setConsumerSurfaces(int streamId,
+            const std::vector<sp<Surface>>& consumers) = 0;
 
 };
 
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index cb965b6..f44fd08 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -20,6 +20,7 @@
 
 #include "CameraProviderManager.h"
 
+#include <chrono>
 #include <android/hidl/manager/1.0/IServiceManager.h>
 #include <hidl/ServiceManagement.h>
 
@@ -63,10 +64,8 @@
                 "about camera providers", __FUNCTION__);
         return INVALID_OPERATION;
     }
-
-    // Also see if there's a passthrough HAL, but let's not complain if there's not
+    // See if there's a passthrough HAL, but let's not complain if there's not
     addProvider(kLegacyProviderName, /*expected*/ false);
-
     return OK;
 }
 
@@ -189,6 +188,42 @@
     return deviceInfo->setTorchMode(enabled);
 }
 
+status_t CameraProviderManager::setUpVendorTags() {
+    // TODO (b/34275821): support aggregating vendor tags for more than one provider
+    for (auto& provider : mProviders) {
+        hardware::hidl_vec<VendorTagSection> vts;
+        Status status;
+        provider->mInterface->getVendorTags(
+            [&](auto s, const auto& vendorTagSecs) {
+                status = s;
+                if (s == Status::OK) {
+                    vts = vendorTagSecs;
+                }
+        });
+
+        if (status != Status::OK) {
+            return mapToStatusT(status);
+        }
+
+        VendorTagDescriptor::clearGlobalVendorTagDescriptor();
+
+        // Read all vendor tag definitions into a descriptor
+        sp<VendorTagDescriptor> desc;
+        status_t res;
+        if ((res = HidlVendorTagDescriptor::createDescriptorFromHidl(vts, /*out*/desc))
+                != OK) {
+            ALOGE("%s: Could not generate descriptor from vendor tag operations,"
+                  "received error %s (%d). Camera clients will not be able to use"
+                  "vendor tags", __FUNCTION__, strerror(res), res);
+            return res;
+        }
+
+        // Set the global descriptor to use with camera metadata
+        VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
+    }
+    return OK;
+}
+
 status_t CameraProviderManager::openSession(const std::string &id,
         const sp<hardware::camera::device::V3_2::ICameraDeviceCallback>& callback,
         /*out*/
@@ -247,7 +282,6 @@
 status_t CameraProviderManager::dump(int fd, const Vector<String16>& args) {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
 
-    dprintf(fd, "Available camera providers and devices:\n");
     for (auto& provider : mProviders) {
         provider->dump(fd, args);
     }
@@ -281,12 +315,11 @@
             mServiceProxy->getService(newProvider);
 
     if (interface == nullptr) {
+        ALOGW("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
+                newProvider.c_str());
         if (expected) {
-            ALOGW("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
-                    newProvider.c_str());
             return BAD_VALUE;
         } else {
-            // Not guaranteed to be found, so not an error if it wasn't
             return OK;
         }
     }
@@ -334,7 +367,8 @@
         ALOGE("%s: Invalid provider name, ignoring", __FUNCTION__);
         return BAD_VALUE;
     }
-    ALOGI("Connecting to new camera provider: %s", mProviderName.c_str());
+    ALOGI("Connecting to new camera provider: %s, isRemote? %d",
+            mProviderName.c_str(), mInterface->isRemote());
     Status status = mInterface->setCallback(this);
     if (status != Status::OK) {
         ALOGE("%s: Unable to register callbacks with camera provider '%s'",
@@ -427,18 +461,46 @@
 }
 
 status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
-    dprintf(fd, "    %s: %zu devices:\n", mProviderName.c_str(), mDevices.size());
+    dprintf(fd, "== Camera Provider HAL %s (v2.4) static info: %zu devices: ==\n",
+            mProviderName.c_str(), mDevices.size());
 
     for (auto& device : mDevices) {
-        dprintf(fd, "        %s: Resource cost: %d\n", device->mName.c_str(),
-                device->mResourceCost.resourceCost);
-        if (device->mResourceCost.conflictingDevices.size() > 0) {
-            dprintf(fd, "            Conflicting devices:\n");
+        dprintf(fd, "== Camera HAL device %s (v%d.%d) static information: ==\n", device->mName.c_str(),
+                device->mVersion.get_major(), device->mVersion.get_minor());
+        dprintf(fd, "  Resource cost: %d\n", device->mResourceCost.resourceCost);
+        if (device->mResourceCost.conflictingDevices.size() == 0) {
+            dprintf(fd, "  Conflicting devices: None\n");
+        } else {
+            dprintf(fd, "  Conflicting devices:\n");
             for (size_t i = 0; i < device->mResourceCost.conflictingDevices.size(); i++) {
-                dprintf(fd, "              %s\n",
+                dprintf(fd, "    %s\n",
                         device->mResourceCost.conflictingDevices[i].c_str());
             }
         }
+        dprintf(fd, "  API1 info:\n");
+        dprintf(fd, "    Has a flash unit: %s\n",
+                device->hasFlashUnit() ? "true" : "false");
+        hardware::CameraInfo info;
+        status_t res = device->getCameraInfo(&info);
+        if (res != OK) {
+            dprintf(fd, "   <Error reading camera info: %s (%d)>\n",
+                    strerror(-res), res);
+        } else {
+            dprintf(fd, "    Facing: %s\n",
+                    info.facing == hardware::CAMERA_FACING_BACK ? "Back" : "Front");
+            dprintf(fd, "    Orientation: %d\n", info.orientation);
+        }
+        CameraMetadata info2;
+        res = device->getCameraCharacteristics(&info2);
+        if (res == INVALID_OPERATION) {
+            dprintf(fd, "  API2 not directly supported\n");
+        } else if (res != OK) {
+            dprintf(fd, "  <Error reading camera characteristics: %s (%d)>\n",
+                    strerror(-res), res);
+        } else {
+            dprintf(fd, "  API2 camera characteristics:\n");
+            info2.dump(fd, /*verbosity*/ 2, /*indentation*/ 4);
+        }
     }
     return OK;
 }
@@ -971,4 +1033,94 @@
     return "UNKNOWN_STATUS";
 }
 
+
+status_t HidlVendorTagDescriptor::createDescriptorFromHidl(
+        const hardware::hidl_vec<hardware::camera::common::V1_0::VendorTagSection>& vts,
+        /*out*/
+        sp<VendorTagDescriptor>& descriptor) {
+
+    int tagCount = 0;
+
+    for (size_t s = 0; s < vts.size(); s++) {
+        tagCount += vts[s].tags.size();
+    }
+
+    if (tagCount < 0 || tagCount > INT32_MAX) {
+        ALOGE("%s: tag count %d from vendor tag sections is invalid.", __FUNCTION__, tagCount);
+        return BAD_VALUE;
+    }
+
+    Vector<uint32_t> tagArray;
+    LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
+            "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
+
+
+    sp<HidlVendorTagDescriptor> desc = new HidlVendorTagDescriptor();
+    desc->mTagCount = tagCount;
+
+    SortedVector<String8> sections;
+    KeyedVector<uint32_t, String8> tagToSectionMap;
+
+    int idx = 0;
+    for (size_t s = 0; s < vts.size(); s++) {
+        const hardware::camera::common::V1_0::VendorTagSection& section = vts[s];
+        const char *sectionName = section.sectionName.c_str();
+        if (sectionName == NULL) {
+            ALOGE("%s: no section name defined for vendor tag section %zu.", __FUNCTION__, s);
+            return BAD_VALUE;
+        }
+        String8 sectionString(sectionName);
+        sections.add(sectionString);
+
+        for (size_t j = 0; j < section.tags.size(); j++) {
+            uint32_t tag = section.tags[j].tagId;
+            if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
+                ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
+                return BAD_VALUE;
+            }
+
+            tagArray.editItemAt(idx++) = section.tags[j].tagId;
+
+            const char *tagName = section.tags[j].tagName.c_str();
+            if (tagName == NULL) {
+                ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
+                return BAD_VALUE;
+            }
+            desc->mTagToNameMap.add(tag, String8(tagName));
+            tagToSectionMap.add(tag, sectionString);
+
+            int tagType = (int) section.tags[j].tagType;
+            if (tagType < 0 || tagType >= NUM_TYPES) {
+                ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
+                return BAD_VALUE;
+            }
+            desc->mTagToTypeMap.add(tag, tagType);
+        }
+    }
+
+    desc->mSections = sections;
+
+    for (size_t i = 0; i < tagArray.size(); ++i) {
+        uint32_t tag = tagArray[i];
+        String8 sectionString = tagToSectionMap.valueFor(tag);
+
+        // Set up tag to section index map
+        ssize_t index = sections.indexOf(sectionString);
+        LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index);
+        desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index));
+
+        // Set up reverse mapping
+        ssize_t reverseIndex = -1;
+        if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
+            KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
+            reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
+        }
+        desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
+    }
+
+    descriptor = desc;
+    return OK;
+}
+
+
 } // namespace android
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 641dab4..5ae16cd 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -29,10 +29,30 @@
 #include <android/hardware/camera/provider/2.4/ICameraProvider.h>
 //#include <android/hardware/camera/provider/2.4/ICameraProviderCallbacks.h>
 #include <android/hidl/manager/1.0/IServiceNotification.h>
+#include <camera/VendorTagDescriptor.h>
 
 namespace android {
 
 /**
+ * The vendor tag descriptor class that takes HIDL vendor tag information as
+ * input. Not part of VendorTagDescriptor class because that class is used
+ * in AIDL generated sources which don't have access to HIDL headers.
+ */
+class HidlVendorTagDescriptor : public VendorTagDescriptor {
+public:
+    /**
+     * Create a VendorTagDescriptor object from the HIDL VendorTagSection
+     * vector.
+     *
+     * Returns OK on success, or a negative error code.
+     */
+    static status_t createDescriptorFromHidl(
+            const hardware::hidl_vec<hardware::camera::common::V1_0::VendorTagSection>& vts,
+            /*out*/
+            sp<VendorTagDescriptor>& descriptor);
+};
+
+/**
  * A manager for all camera providers available on an Android device.
  *
  * Responsible for enumerating providers and the individual camera devices
@@ -156,6 +176,11 @@
     status_t setTorchMode(const std::string &id, bool enabled);
 
     /**
+     * Setup vendor tags for all registered providers
+     */
+    status_t setUpVendorTags();
+
+    /**
      * Open an active session to a camera device.
      *
      * This fully powers on the camera device hardware, and returns a handle to a
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6f64dc3..208dcb6 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -53,6 +53,7 @@
 #include "device3/Camera3InputStream.h"
 #include "device3/Camera3ZslStream.h"
 #include "device3/Camera3DummyStream.h"
+#include "device3/Camera3SharedOutputStream.h"
 #include "CameraService.h"
 
 using namespace android::camera3;
@@ -749,7 +750,7 @@
     mTagMonitor.dumpMonitoredMetadata(fd);
 
     if (mInterface->valid()) {
-        lines = String8("    HAL device dump:\n");
+        lines = String8("     HAL device dump:\n");
         write(fd, lines.string(), lines.size());
         mInterface->dump(fd);
     }
@@ -792,7 +793,9 @@
 }
 
 status_t Camera3Device::convertMetadataListToRequestListLocked(
-        const List<const CameraMetadata> &metadataList, bool repeating,
+        const List<const CameraMetadata> &metadataList,
+        const std::list<const SurfaceMap> &surfaceMaps,
+        bool repeating,
         RequestList *requestList) {
     if (requestList == NULL) {
         CLOGE("requestList cannot be NULL.");
@@ -800,9 +803,11 @@
     }
 
     int32_t burstId = 0;
-    for (List<const CameraMetadata>::const_iterator it = metadataList.begin();
-            it != metadataList.end(); ++it) {
-        sp<CaptureRequest> newRequest = setUpRequestLocked(*it);
+    List<const CameraMetadata>::const_iterator metadataIt = metadataList.begin();
+    std::list<const SurfaceMap>::const_iterator surfaceMapIt = surfaceMaps.begin();
+    for (; metadataIt != metadataList.end() && surfaceMapIt != surfaceMaps.end();
+            ++metadataIt, ++surfaceMapIt) {
+        sp<CaptureRequest> newRequest = setUpRequestLocked(*metadataIt, *surfaceMapIt);
         if (newRequest == 0) {
             CLOGE("Can't create capture request");
             return BAD_VALUE;
@@ -812,12 +817,12 @@
 
         // Setup burst Id and request Id
         newRequest->mResultExtras.burstId = burstId++;
-        if (it->exists(ANDROID_REQUEST_ID)) {
-            if (it->find(ANDROID_REQUEST_ID).count == 0) {
+        if (metadataIt->exists(ANDROID_REQUEST_ID)) {
+            if (metadataIt->find(ANDROID_REQUEST_ID).count == 0) {
                 CLOGE("RequestID entry exists; but must not be empty in metadata");
                 return BAD_VALUE;
             }
-            newRequest->mResultExtras.requestId = it->find(ANDROID_REQUEST_ID).data.i32[0];
+            newRequest->mResultExtras.requestId = metadataIt->find(ANDROID_REQUEST_ID).data.i32[0];
         } else {
             CLOGE("RequestID does not exist in metadata");
             return BAD_VALUE;
@@ -827,6 +832,10 @@
 
         ALOGV("%s: requestId = %" PRId32, __FUNCTION__, newRequest->mResultExtras.requestId);
     }
+    if (metadataIt != metadataList.end() || surfaceMapIt != surfaceMaps.end()) {
+        ALOGE("%s: metadataList and surfaceMaps are not the same size!", __FUNCTION__);
+        return BAD_VALUE;
+    }
 
     // Setup batch size if this is a high speed video recording request.
     if (mIsConstrainedHighSpeedConfiguration && requestList->size() > 0) {
@@ -846,12 +855,31 @@
     ATRACE_CALL();
 
     List<const CameraMetadata> requests;
+    std::list<const SurfaceMap> surfaceMaps;
+    convertToRequestList(requests, surfaceMaps, request);
+
+    return captureList(requests, surfaceMaps, /*lastFrameNumber*/NULL);
+}
+
+void Camera3Device::convertToRequestList(List<const CameraMetadata>& requests,
+        std::list<const SurfaceMap>& surfaceMaps,
+        const CameraMetadata& request) {
     requests.push_back(request);
-    return captureList(requests, /*lastFrameNumber*/NULL);
+
+    SurfaceMap surfaceMap;
+    camera_metadata_ro_entry streams = request.find(ANDROID_REQUEST_OUTPUT_STREAMS);
+    // With no surface list passed in, stream and surface will have 1-to-1
+    // mapping. So the surface index is 0 for each stream in the surfaceMap.
+    for (size_t i = 0; i < streams.count; i++) {
+        surfaceMap[streams.data.i32[i]].push_back(0);
+    }
+    surfaceMaps.push_back(surfaceMap);
 }
 
 status_t Camera3Device::submitRequestsHelper(
-        const List<const CameraMetadata> &requests, bool repeating,
+        const List<const CameraMetadata> &requests,
+        const std::list<const SurfaceMap> &surfaceMaps,
+        bool repeating,
         /*out*/
         int64_t *lastFrameNumber) {
     ATRACE_CALL();
@@ -866,8 +894,8 @@
 
     RequestList requestList;
 
-    res = convertMetadataListToRequestListLocked(requests, repeating,
-            /*out*/&requestList);
+    res = convertMetadataListToRequestListLocked(requests, surfaceMaps,
+            repeating, /*out*/&requestList);
     if (res != OK) {
         // error logged by previous call
         return res;
@@ -927,8 +955,7 @@
         bDst.stream = mOutputStreams.valueAt(idx)->asHalStream();
 
         buffer_handle_t *buffer;
-        res = mInterface->popInflightBuffer(result.frameNumber, bSrc.streamId,
-                &buffer);
+        res = mInterface->popInflightBuffer(result.frameNumber, bSrc.streamId, &buffer);
         if (res != OK) {
             ALOGE("%s: Frame %d: Buffer %zu: No in-flight buffer for stream %d",
                     __FUNCTION__, result.frameNumber, i, bSrc.streamId);
@@ -1035,10 +1062,11 @@
 }
 
 status_t Camera3Device::captureList(const List<const CameraMetadata> &requests,
+                                    const std::list<const SurfaceMap> &surfaceMaps,
                                     int64_t *lastFrameNumber) {
     ATRACE_CALL();
 
-    return submitRequestsHelper(requests, /*repeating*/false, lastFrameNumber);
+    return submitRequestsHelper(requests, surfaceMaps, /*repeating*/false, lastFrameNumber);
 }
 
 status_t Camera3Device::setStreamingRequest(const CameraMetadata &request,
@@ -1046,19 +1074,23 @@
     ATRACE_CALL();
 
     List<const CameraMetadata> requests;
-    requests.push_back(request);
-    return setStreamingRequestList(requests, /*lastFrameNumber*/NULL);
+    std::list<const SurfaceMap> surfaceMaps;
+    convertToRequestList(requests, surfaceMaps, request);
+
+    return setStreamingRequestList(requests, /*surfaceMap*/surfaceMaps,
+                                   /*lastFrameNumber*/NULL);
 }
 
 status_t Camera3Device::setStreamingRequestList(const List<const CameraMetadata> &requests,
+                                                const std::list<const SurfaceMap> &surfaceMaps,
                                                 int64_t *lastFrameNumber) {
     ATRACE_CALL();
 
-    return submitRequestsHelper(requests, /*repeating*/true, lastFrameNumber);
+    return submitRequestsHelper(requests, surfaceMaps, /*repeating*/true, lastFrameNumber);
 }
 
 sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked(
-        const CameraMetadata &request) {
+        const CameraMetadata &request, const SurfaceMap &surfaceMap) {
     status_t res;
 
     if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
@@ -1074,7 +1106,7 @@
         }
     }
 
-    sp<CaptureRequest> newRequest = createCaptureRequest(request);
+    sp<CaptureRequest> newRequest = createCaptureRequest(request, surfaceMap);
     return newRequest;
 }
 
@@ -1258,14 +1290,33 @@
 }
 
 status_t Camera3Device::createStream(sp<Surface> consumer,
-        uint32_t width, uint32_t height, int format, android_dataspace dataSpace,
-        camera3_stream_rotation_t rotation, int *id, int streamSetId, uint32_t consumerUsage) {
+            uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+            int streamSetId, bool isShared, uint32_t consumerUsage) {
+    ATRACE_CALL();
+
+    if (consumer == nullptr) {
+        ALOGE("%s: consumer must not be null", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    std::vector<sp<Surface>> consumers;
+    consumers.push_back(consumer);
+
+    return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
+            format, dataSpace, rotation, id, streamSetId, isShared, consumerUsage);
+}
+
+status_t Camera3Device::createStream(const std::vector<sp<Surface>>& consumers,
+        bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+        int streamSetId, bool isShared, uint32_t consumerUsage) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
     ALOGV("Camera %s: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
-            " consumer usage 0x%x", mId.string(), mNextStreamId, width, height, format, dataSpace, rotation,
-            consumerUsage);
+            " consumer usage 0x%x, isShared %d", mId.string(), mNextStreamId, width, height, format,
+            dataSpace, rotation, consumerUsage, isShared);
 
     status_t res;
     bool wasActive = false;
@@ -1303,14 +1354,18 @@
         streamSetId = CAMERA3_STREAM_SET_ID_INVALID;
     }
 
+    if (consumers.size() == 0 && !hasDeferredConsumer) {
+        ALOGE("%s: Number of consumers cannot be smaller than 1", __FUNCTION__);
+        return BAD_VALUE;
+    }
     // HAL3.1 doesn't support deferred consumer stream creation as it requires buffer registration
     // which requires a consumer surface to be available.
-    if (consumer == nullptr && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+    if (hasDeferredConsumer && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
         ALOGE("HAL3.1 doesn't support deferred consumer stream creation");
         return BAD_VALUE;
     }
 
-    if (consumer == nullptr && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+    if (hasDeferredConsumer && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
         ALOGE("Deferred consumer stream creation only support IMPLEMENTATION_DEFINED format");
         return BAD_VALUE;
     }
@@ -1334,7 +1389,7 @@
                 return BAD_VALUE;
             }
         }
-        newStream = new Camera3OutputStream(mNextStreamId, consumer,
+        newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, blobBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
     } else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
@@ -1343,15 +1398,19 @@
             SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
             return BAD_VALUE;
         }
-        newStream = new Camera3OutputStream(mNextStreamId, consumer,
+        newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
-    } else if (consumer == nullptr) {
+    } else if (isShared) {
+        newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
+                width, height, format, consumerUsage, dataSpace, rotation,
+                mTimestampOffset, streamSetId);
+    } else if (consumers.size() == 0 && hasDeferredConsumer) {
         newStream = new Camera3OutputStream(mNextStreamId,
                 width, height, format, consumerUsage, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
     } else {
-        newStream = new Camera3OutputStream(mNextStreamId, consumer,
+        newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, format, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
     }
@@ -2006,14 +2065,16 @@
     }
 }
 
-status_t Camera3Device::setConsumerSurface(int streamId, sp<Surface> consumer) {
+status_t Camera3Device::setConsumerSurfaces(int streamId,
+        const std::vector<sp<Surface>>& consumers) {
     ATRACE_CALL();
-    ALOGV("%s: Camera %s: set consumer surface for stream %d", __FUNCTION__, mId.string(), streamId);
+    ALOGV("%s: Camera %s: set consumer surface for stream %d",
+            __FUNCTION__, mId.string(), streamId);
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
 
-    if (consumer == nullptr) {
-        CLOGE("Null consumer is passed!");
+    if (consumers.size() == 0) {
+        CLOGE("No consumer is passed!");
         return BAD_VALUE;
     }
 
@@ -2023,22 +2084,24 @@
         return idx;
     }
     sp<Camera3OutputStreamInterface> stream = mOutputStreams[idx];
-    status_t res = stream->setConsumer(consumer);
+    status_t res = stream->setConsumers(consumers);
     if (res != OK) {
         CLOGE("Stream %d set consumer failed (error %d %s) ", streamId, res, strerror(-res));
         return res;
     }
 
-    if (!stream->isConfiguring()) {
-        CLOGE("Stream %d was already fully configured.", streamId);
-        return INVALID_OPERATION;
-    }
+    if (stream->isConsumerConfigurationDeferred()) {
+        if (!stream->isConfiguring()) {
+            CLOGE("Stream %d was already fully configured.", streamId);
+            return INVALID_OPERATION;
+        }
 
-    res = stream->finishConfiguration();
-    if (res != OK) {
-        SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
-                stream->getId(), strerror(-res), res);
-        return res;
+        res = stream->finishConfiguration();
+        if (res != OK) {
+            SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
+                   stream->getId(), strerror(-res), res);
+            return res;
+        }
     }
 
     return OK;
@@ -2049,7 +2112,7 @@
  */
 
 sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest(
-        const CameraMetadata &request) {
+        const CameraMetadata &request, const SurfaceMap &surfaceMap) {
     ATRACE_CALL();
     status_t res;
 
@@ -2104,10 +2167,17 @@
                 mOutputStreams.editValueAt(idx);
 
         // It is illegal to include a deferred consumer output stream into a request
-        if (stream->isConsumerConfigurationDeferred()) {
-            CLOGE("Stream %d hasn't finished configuration yet due to deferred consumer",
-                    stream->getId());
-            return NULL;
+        auto iter = surfaceMap.find(streams.data.i32[i]);
+        if (iter != surfaceMap.end()) {
+            const std::vector<size_t>& surfaces = iter->second;
+            for (const auto& surface : surfaces) {
+                if (stream->isConsumerConfigurationDeferred(surface)) {
+                    CLOGE("Stream %d surface %zu hasn't finished configuration yet "
+                          "due to deferred consumer", stream->getId(), surface);
+                    return NULL;
+                }
+            }
+            newRequest->mOutputSurfaces[i] = surfaces;
         }
 
         // Lazy completion of stream configuration (allocation/registration)
@@ -3113,7 +3183,7 @@
         res = mHal3Device->ops->configure_streams(mHal3Device, config);
     } else {
         // Convert stream config to HIDL
-
+        std::set<int> activeStreams;
         StreamConfiguration requestedConfiguration;
         requestedConfiguration.streams.resize(config->num_streams);
         for (size_t i = 0; i < config->num_streams; i++) {
@@ -3142,7 +3212,24 @@
             dst.usage = mapToConsumerUsage(src->usage);
             dst.dataSpace = mapToHidlDataspace(src->data_space);
             dst.rotation = mapToStreamRotation((camera3_stream_rotation_t) src->rotation);
+
+            activeStreams.insert(streamId);
+            // Create Buffer ID map if necessary
+            if (mBufferIdMaps.count(streamId) == 0) {
+                mBufferIdMaps.emplace(streamId, BufferIdMap{});
+            }
         }
+        // remove BufferIdMap for deleted streams
+        for(auto it = mBufferIdMaps.begin(); it != mBufferIdMaps.end();) {
+            int streamId = it->first;
+            bool active = activeStreams.count(streamId) > 0;
+            if (!active) {
+                it = mBufferIdMaps.erase(it);
+            } else {
+                ++it;
+            }
+        }
+
         requestedConfiguration.operationMode = mapToStreamConfigurationMode(
                 (camera3_stream_configuration_mode_t) config->operation_mode);
 
@@ -3239,8 +3326,13 @@
             std::lock_guard<std::mutex> lock(mInflightLock);
             if (request->input_buffer != nullptr) {
                 int32_t streamId = Camera3Stream::cast(request->input_buffer->stream)->getId();
+                buffer_handle_t buf = *(request->input_buffer->buffer);
+                auto pair = getBufferId(buf, streamId);
+                bool isNewBuffer = pair.first;
+                uint64_t bufferId = pair.second;
                 captureRequest.inputBuffer.streamId = streamId;
-                captureRequest.inputBuffer.buffer = *(request->input_buffer->buffer);
+                captureRequest.inputBuffer.bufferId = bufferId;
+                captureRequest.inputBuffer.buffer = (isNewBuffer) ? buf : nullptr;
                 captureRequest.inputBuffer.status = BufferStatus::OK;
                 native_handle_t *acquireFence = nullptr;
                 if (request->input_buffer->acquire_fence != -1) {
@@ -3252,7 +3344,11 @@
                 captureRequest.inputBuffer.releaseFence = nullptr;
 
                 pushInflightBufferLocked(captureRequest.frameNumber, streamId,
-                        request->input_buffer->buffer);
+                        request->input_buffer->buffer,
+                        request->input_buffer->acquire_fence);
+            } else {
+                captureRequest.inputBuffer.streamId = -1;
+                captureRequest.inputBuffer.bufferId = BUFFER_ID_NO_BUFFER;
             }
 
             captureRequest.outputBuffers.resize(request->num_output_buffers);
@@ -3260,8 +3356,12 @@
                 const camera3_stream_buffer_t *src = request->output_buffers + i;
                 StreamBuffer &dst = captureRequest.outputBuffers[i];
                 int32_t streamId = Camera3Stream::cast(src->stream)->getId();
+                buffer_handle_t buf = *(src->buffer);
+                auto pair = getBufferId(buf, streamId);
+                bool isNewBuffer = pair.first;
                 dst.streamId = streamId;
-                dst.buffer = *(src->buffer);
+                dst.bufferId = pair.second;
+                dst.buffer = isNewBuffer ? buf : nullptr;
                 dst.status = BufferStatus::OK;
                 native_handle_t *acquireFence = nullptr;
                 if (src->acquire_fence != -1) {
@@ -3273,7 +3373,7 @@
                 dst.releaseFence = nullptr;
 
                 pushInflightBufferLocked(captureRequest.frameNumber, streamId,
-                        src->buffer);
+                        src->buffer, src->acquire_fence);
             }
         }
         common::V1_0::Status status = mHidlSession->processCaptureRequest(captureRequest);
@@ -3326,24 +3426,45 @@
 }
 
 status_t Camera3Device::HalInterface::pushInflightBufferLocked(
-        int32_t frameNumber, int32_t streamId, buffer_handle_t *buffer) {
+        int32_t frameNumber, int32_t streamId, buffer_handle_t *buffer, int acquireFence) {
     uint64_t key = static_cast<uint64_t>(frameNumber) << 32 | static_cast<uint64_t>(streamId);
-    mInflightBufferMap[key] = buffer;
+    auto pair = std::make_pair(buffer, acquireFence);
+    mInflightBufferMap[key] = pair;
     return OK;
 }
 
 status_t Camera3Device::HalInterface::popInflightBuffer(
-        int32_t frameNumber, int32_t streamId, /*out*/ buffer_handle_t **buffer) {
+        int32_t frameNumber, int32_t streamId,
+        /*out*/ buffer_handle_t **buffer) {
     std::lock_guard<std::mutex> lock(mInflightLock);
 
     uint64_t key = static_cast<uint64_t>(frameNumber) << 32 | static_cast<uint64_t>(streamId);
     auto it = mInflightBufferMap.find(key);
     if (it == mInflightBufferMap.end()) return NAME_NOT_FOUND;
-    *buffer = it->second;
+    auto pair = it->second;
+    *buffer = pair.first;
+    int acquireFence = pair.second;
+    if (acquireFence > 0) {
+        ::close(acquireFence);
+    }
     mInflightBufferMap.erase(it);
     return OK;
 }
 
+std::pair<bool, uint64_t> Camera3Device::HalInterface::getBufferId(
+        const buffer_handle_t& buf, int streamId) {
+    std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+
+    BufferIdMap& bIdMap = mBufferIdMaps.at(streamId);
+    auto it = bIdMap.find(buf);
+    if (it == bIdMap.end()) {
+        bIdMap[buf] = mNextBufferId++;
+        return std::make_pair(true, mNextBufferId - 1);
+    } else {
+        return std::make_pair(false, it->second);
+    }
+}
+
 /**
  * RequestThread inner class methods
  */
@@ -3927,6 +4048,14 @@
                 return TIMED_OUT;
             }
             halRequest->num_output_buffers++;
+
+            res = outputStream->notifyRequestedSurfaces(halRequest->frame_number,
+                    captureRequest->mOutputSurfaces[i]);
+            if (res != OK) {
+                ALOGE("RequestThread: Cannot register output surfaces: %s (%d)",
+                      strerror(-res), res);
+                return INVALID_OPERATION;
+            }
         }
         totalNumBuffers += halRequest->num_output_buffers;
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 217c8b7..91d682e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -95,10 +95,12 @@
     // idle state
     status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL) override;
     status_t captureList(const List<const CameraMetadata> &requests,
+            const std::list<const SurfaceMap> &surfaceMaps,
             int64_t *lastFrameNumber = NULL) override;
     status_t setStreamingRequest(const CameraMetadata &request,
             int64_t *lastFrameNumber = NULL) override;
     status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+            const std::list<const SurfaceMap> &surfaceMaps,
             int64_t *lastFrameNumber = NULL) override;
     status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL) override;
 
@@ -107,13 +109,19 @@
     // Actual stream creation/deletion is delayed until first request is submitted
     // If adding streams while actively capturing, will pause device before adding
     // stream, reconfiguring device, and unpausing. If the client create a stream
-    // with nullptr consumer surface, the client must then call setConsumer()
+    // with nullptr consumer surface, the client must then call setConsumers()
     // and finish the stream configuration before starting output streaming.
     status_t createStream(sp<Surface> consumer,
             uint32_t width, uint32_t height, int format,
             android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
-            uint32_t consumerUsage = 0) override;
+            bool isShared = false, uint32_t consumerUsage = 0) override;
+    status_t createStream(const std::vector<sp<Surface>>& consumers,
+            bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+            bool isShared = false, uint32_t consumerUsage = 0) override;
+
     status_t createInputStream(
             uint32_t width, uint32_t height, int format,
             int *id) override;
@@ -175,10 +183,10 @@
     void             notifyStatus(bool idle); // updates from StatusTracker
 
     /**
-     * Set the deferred consumer surface to the output stream and finish the deferred
+     * Set the deferred consumer surfaces to the output stream and finish the deferred
      * consumer configuration.
      */
-    status_t setConsumerSurface(int streamId, sp<Surface> consumer) override;
+    status_t setConsumerSurfaces(int streamId, const std::vector<sp<Surface>>& consumers) override;
 
   private:
     static const size_t        kDumpLockAttempts  = 10;
@@ -254,9 +262,57 @@
         std::mutex mInflightLock;
 
         status_t pushInflightBufferLocked(int32_t frameNumber, int32_t streamId,
-                buffer_handle_t *buffer);
+                buffer_handle_t *buffer, int acquireFence);
         // Cache of buffer handles keyed off (frameNumber << 32 | streamId)
-        std::unordered_map<uint64_t, buffer_handle_t*> mInflightBufferMap;
+        // value is a pair of (buffer_handle_t*, acquire_fence FD)
+        std::unordered_map<uint64_t, std::pair<buffer_handle_t*, int>> mInflightBufferMap;
+
+        struct BufferHasher {
+            size_t operator()(const buffer_handle_t& buf) const {
+                if (buf == nullptr)
+                    return 0;
+
+                size_t result = 1;
+                result = 31 * result + buf->numFds;
+                result = 31 * result + buf->numInts;
+                int length = buf->numFds + buf->numInts;
+                for (int i = 0; i < length; i++) {
+                    result = 31 * result + buf->data[i];
+                }
+                return result;
+            }
+        };
+
+        struct BufferComparator {
+            bool operator()(const buffer_handle_t& buf1, const buffer_handle_t& buf2) const {
+                if (buf1->numFds == buf2->numFds && buf1->numInts == buf2->numInts) {
+                    int length = buf1->numFds + buf1->numInts;
+                    for (int i = 0; i < length; i++) {
+                        if (buf1->data[i] != buf2->data[i]) {
+                            return false;
+                        }
+                    }
+                    return true;
+                }
+                return false;
+            }
+        };
+
+        std::mutex mBufferIdMapLock; // protecting mBufferIdMaps and mNextBufferId
+        typedef std::unordered_map<const buffer_handle_t, uint64_t,
+                BufferHasher, BufferComparator> BufferIdMap;
+        // stream ID -> per stream buffer ID map
+        std::unordered_map<int, BufferIdMap> mBufferIdMaps;
+        uint64_t mNextBufferId = 1; // 0 means no buffer
+        static const uint64_t BUFFER_ID_NO_BUFFER = 0;
+
+        // method to extract buffer's unique ID
+        // TODO: we should switch to use gralloc mapper's getBackingStore API
+        //       once we ran in binderized gralloc mode, but before that is ready,
+        //       we need to rely on the conventional buffer queue behavior where
+        //       buffer_handle_t's FD won't change.
+        // return pair of (newlySeenBuffer?, bufferId)
+        std::pair<bool, uint64_t> getBufferId(const buffer_handle_t& buf, int streamId);
     };
 
     std::unique_ptr<HalInterface> mInterface;
@@ -342,6 +398,7 @@
         camera3_stream_buffer_t             mInputBuffer;
         Vector<sp<camera3::Camera3OutputStreamInterface> >
                                             mOutputStreams;
+        SurfaceMap                          mOutputSurfaces;
         CaptureResultExtras                 mResultExtras;
         // Used to cancel AE precapture trigger for devices doesn't support
         // CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
@@ -360,11 +417,18 @@
 
     status_t convertMetadataListToRequestListLocked(
             const List<const CameraMetadata> &metadataList,
+            const std::list<const SurfaceMap> &surfaceMaps,
             bool repeating,
             /*out*/
             RequestList *requestList);
 
-    status_t submitRequestsHelper(const List<const CameraMetadata> &requests, bool repeating,
+    void convertToRequestList(List<const CameraMetadata>& requests,
+            std::list<const SurfaceMap>& surfaceMaps,
+            const CameraMetadata& request);
+
+    status_t submitRequestsHelper(const List<const CameraMetadata> &requests,
+                                  const std::list<const SurfaceMap> &surfaceMaps,
+                                  bool repeating,
                                   int64_t *lastFrameNumber = NULL);
 
 
@@ -436,13 +500,15 @@
      * Do common work for setting up a streaming or single capture request.
      * On success, will transition to ACTIVE if in IDLE.
      */
-    sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request);
+    sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request,
+                                          const SurfaceMap &surfaceMap);
 
     /**
      * Build a CaptureRequest request from the CameraDeviceBase request
      * settings.
      */
-    sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request);
+    sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request,
+                                            const SurfaceMap &surfaceMap);
 
     /**
      * Take the currently-defined set of streams and configure the HAL to use
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 5123785..1a730d6 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -83,6 +83,14 @@
     return OK;
 }
 
+status_t Camera3DummyStream::notifyRequestedSurfaces(uint32_t frame_number,
+        const std::vector<size_t>& surface_ids) {
+    (void) frame_number;
+    (void) surface_ids;
+    // Do nothing
+    return OK;
+}
+
 status_t Camera3DummyStream::configureQueueLocked() {
     // Do nothing
     return OK;
@@ -103,13 +111,13 @@
     return false;
 }
 
-bool Camera3DummyStream::isConsumerConfigurationDeferred() const {
+bool Camera3DummyStream::isConsumerConfigurationDeferred(size_t /*surface_id*/) const {
     return false;
 }
 
-status_t Camera3DummyStream::setConsumer(sp<Surface> consumer) {
-    ALOGE("%s: Stream %d: Dummy stream doesn't support set consumer surface %p!",
-            __FUNCTION__, mId, consumer.get());
+status_t Camera3DummyStream::setConsumers(const std::vector<sp<Surface>>& /*consumers*/) {
+    ALOGE("%s: Stream %d: Dummy stream doesn't support set consumer surface!",
+            __FUNCTION__, mId);
     return INVALID_OPERATION;
 }
 }; // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 18e8a23..b6ec99c 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -56,6 +56,9 @@
 
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
 
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids);
+
     /**
      * Return if this output stream is for video encoding.
      */
@@ -64,12 +67,12 @@
     /**
      * Return if the consumer configuration of this stream is deferred.
      */
-    virtual bool isConsumerConfigurationDeferred() const;
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
 
     /**
-     * Set the consumer surface to the output stream.
+     * Set the consumer surfaces to the output stream.
      */
-    virtual status_t setConsumer(sp<Surface> consumer);
+    virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers);
 
   protected:
 
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index f781ded..1469b74 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -85,6 +85,9 @@
                         /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/false);
     mBuffersInFlight.push_back(bufferItem);
 
+    mFrameCount++;
+    mLastTimestamp = bufferItem.mTimestamp;
+
     return OK;
 }
 
@@ -220,6 +223,7 @@
 
     mHandoutTotalBufferCount = 0;
     mFrameCount = 0;
+    mLastTimestamp = 0;
 
     if (mConsumer.get() == 0) {
         sp<IGraphicBufferProducer> producer;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 7229929..f971116 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -124,6 +124,7 @@
                                          int format,
                                          android_dataspace dataSpace,
                                          camera3_stream_rotation_t rotation,
+                                         uint32_t consumerUsage, nsecs_t timestampOffset,
                                          int setId) :
         Camera3IOStreamBase(id, type, width, height,
                             /*maxSize*/0,
@@ -132,7 +133,8 @@
         mTraceFirstBuffer(true),
         mUseMonoTimestamp(false),
         mUseBufferManager(false),
-        mConsumerUsage(0) {
+        mTimestampOffset(timestampOffset),
+        mConsumerUsage(consumerUsage) {
 
     if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
         mBufferReleasedListener = new BufferReleasedListener(this);
@@ -237,6 +239,7 @@
     }
 
     mLastTimestamp = timestamp;
+    mFrameCount++;
 
     return OK;
 }
@@ -373,6 +376,24 @@
         return res;
     }
 
+    if ((res = configureConsumerQueueLocked()) != OK) {
+        return res;
+    }
+
+    // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+    // We need skip these cases as timeout will disable the non-blocking (async) mode.
+    if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
+        mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+    }
+
+    return OK;
+}
+
+status_t Camera3OutputStream::configureConsumerQueueLocked() {
+    status_t res;
+
+    mTraceFirstBuffer = true;
+
     ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
 
     // Configure consumer-side ANativeWindow interface. The listener may be used
@@ -470,12 +491,7 @@
     if (res != OK) {
         ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
                 __FUNCTION__, mTransform, strerror(-res), res);
-    }
-
-    // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
-    // We need skip these cases as timeout will disable the non-blocking (async) mode.
-    if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
-        mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+        return res;
     }
 
     /**
@@ -568,14 +584,24 @@
 status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) const {
 
     status_t res;
-    int32_t u = 0;
+
     if (mConsumer == nullptr) {
         // mConsumerUsage was sanitized before the Camera3OutputStream was constructed.
         *usage = mConsumerUsage;
         return OK;
     }
 
-    res = static_cast<ANativeWindow*>(mConsumer.get())->query(mConsumer.get(),
+    res = getEndpointUsageForSurface(usage, mConsumer);
+
+    return res;
+}
+
+status_t Camera3OutputStream::getEndpointUsageForSurface(uint32_t *usage,
+        const sp<Surface>& surface) const {
+    status_t res;
+    int32_t u = 0;
+
+    res = static_cast<ANativeWindow*>(surface.get())->query(surface.get(),
             NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u);
 
     // If an opaque output stream's endpoint is ImageReader, add
@@ -587,8 +613,8 @@
     //     3. GRALLOC_USAGE_HW_COMPOSER
     //     4. GRALLOC_USAGE_HW_VIDEO_ENCODER
     if (camera3_stream::format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
-            (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_COMPOSER |
-            GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
+            (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
+            GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
         u |= GRALLOC_USAGE_HW_CAMERA_ZSL;
     }
 
@@ -676,14 +702,28 @@
     return OK;
 }
 
-bool Camera3OutputStream::isConsumerConfigurationDeferred() const {
+status_t Camera3OutputStream::notifyRequestedSurfaces(uint32_t /*frame_number*/,
+        const std::vector<size_t>& /*surface_ids*/) {
+    return OK;
+}
+
+bool Camera3OutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
     Mutex::Autolock l(mLock);
+
+    if (surface_id != 0) {
+        ALOGE("%s: surface_id %zu for Camera3OutputStream should be 0!", __FUNCTION__, surface_id);
+    }
     return mConsumer == nullptr;
 }
 
-status_t Camera3OutputStream::setConsumer(sp<Surface> consumer) {
-    if (consumer == nullptr) {
-        ALOGE("%s: it's illegal to set a null consumer surface!", __FUNCTION__);
+status_t Camera3OutputStream::setConsumers(const std::vector<sp<Surface>>& consumers) {
+    if (consumers.size() != 1) {
+        ALOGE("%s: it's illegal to set %zu consumer surfaces!",
+                  __FUNCTION__, consumers.size());
+        return INVALID_OPERATION;
+    }
+    if (consumers[0] == nullptr) {
+        ALOGE("%s: it's illegal to set null consumer surface!", __FUNCTION__);
         return INVALID_OPERATION;
     }
 
@@ -692,7 +732,7 @@
         return INVALID_OPERATION;
     }
 
-    mConsumer = consumer;
+    mConsumer = consumers[0];
     return OK;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 12d497e..080c721 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -135,12 +135,12 @@
     /**
      * Return if the consumer configuration of this stream is deferred.
      */
-    virtual bool isConsumerConfigurationDeferred() const;
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
 
     /**
-     * Set the consumer surface to the output stream.
+     * Set the consumer surfaces to the output stream.
      */
-    virtual status_t setConsumer(sp<Surface> consumer);
+    virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers);
 
     class BufferReleasedListener : public BnProducerListener {
         public:
@@ -158,6 +158,9 @@
 
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
 
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids);
+
     /**
      * Set the graphic buffer manager to get/return the stream buffers.
      *
@@ -169,6 +172,7 @@
     Camera3OutputStream(int id, camera3_stream_type_t type,
             uint32_t width, uint32_t height, int format,
             android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+            uint32_t consumerUsage = 0, nsecs_t timestampOffset = 0,
             int setId = CAMERA3_STREAM_SET_ID_INVALID);
 
     /**
@@ -183,12 +187,19 @@
 
     virtual status_t disconnectLocked();
 
+    status_t getEndpointUsageForSurface(uint32_t *usage,
+            const sp<Surface>& surface) const;
+    status_t configureConsumerQueueLocked();
+
+    // Consumer as the output of camera HAL
     sp<Surface> mConsumer;
 
-  private:
+    uint32_t getPresetConsumerUsage() const { return mConsumerUsage; }
 
     static const nsecs_t       kDequeueBufferTimeout   = 1000000000; // 1 sec
 
+  private:
+
     int               mTransform;
 
     virtual status_t  setTransformLocked(int transform);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index 3f83c89..11868e7 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -43,12 +43,12 @@
     /**
      * Return if the consumer configuration of this stream is deferred.
      */
-    virtual bool isConsumerConfigurationDeferred() const = 0;
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id = 0) const = 0;
 
     /**
-     * Set the consumer surface to the output stream.
+     * Set the consumer surfaces to the output stream.
      */
-    virtual status_t setConsumer(sp<Surface> consumer) = 0;
+    virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers) = 0;
 
     /**
      * Detach an unused buffer from the stream.
@@ -59,6 +59,20 @@
      *
      */
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) = 0;
+
+    /**
+     * Notify which surfaces are requested for a particular frame number.
+     *
+     * Mulitple surfaces could share the same output stream, but a request may
+     * be only for a subset of surfaces. In this case, the
+     * Camera3OutputStreamInterface object needs to manage the output surfaces on
+     * a per request basis.
+     *
+     * If there is only one surface for this output stream, calling this
+     * function is a no-op.
+     */
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids) = 0;
 };
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
new file mode 100644
index 0000000..0d6a96c
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Camera3SharedOutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3SharedOutputStream::Camera3SharedOutputStream(int id,
+        const std::vector<sp<Surface>>& surfaces,
+        uint32_t width, uint32_t height, int format,
+        uint32_t consumerUsage, android_dataspace dataSpace,
+        camera3_stream_rotation_t rotation,
+        nsecs_t timestampOffset, int setId) :
+        Camera3OutputStream(id, CAMERA3_STREAM_OUTPUT, width, height,
+                            format, dataSpace, rotation, consumerUsage,
+                            timestampOffset, setId),
+        mSurfaces(surfaces) {
+}
+
+Camera3SharedOutputStream::~Camera3SharedOutputStream() {
+    disconnectLocked();
+}
+
+status_t Camera3SharedOutputStream::connectStreamSplitterLocked() {
+    status_t res = OK;
+
+    mStreamSplitter = new Camera3StreamSplitter();
+
+    uint32_t usage;
+    getEndpointUsage(&usage);
+
+    res = mStreamSplitter->connect(mSurfaces, usage, camera3_stream::max_buffers, mConsumer);
+    if (res != OK) {
+        ALOGE("%s: Failed to connect to stream splitter: %s(%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    return res;
+}
+
+status_t Camera3SharedOutputStream::notifyRequestedSurfaces(uint32_t /*frame_number*/,
+        const std::vector<size_t>& surface_ids) {
+    Mutex::Autolock l(mLock);
+    status_t res = OK;
+
+    if (mStreamSplitter != nullptr) {
+        res = mStreamSplitter->notifyRequestedSurfaces(surface_ids);
+    }
+
+    return res;
+}
+
+bool Camera3SharedOutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
+    Mutex::Autolock l(mLock);
+    return (surface_id >= mSurfaces.size());
+}
+
+status_t Camera3SharedOutputStream::setConsumers(const std::vector<sp<Surface>>& surfaces) {
+    if (surfaces.size() == 0) {
+        ALOGE("%s: it's illegal to set zero consumer surfaces!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    status_t ret = OK;
+    for (auto& surface : surfaces) {
+        if (surface == nullptr) {
+            ALOGE("%s: it's illegal to set a null consumer surface!", __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+
+        mSurfaces.push_back(surface);
+
+        // Only call addOutput if the splitter has been connected.
+        if (mStreamSplitter != nullptr) {
+            ret = mStreamSplitter->addOutput(surface, camera3_stream::max_buffers);
+            if (ret != OK) {
+                ALOGE("%s: addOutput failed with error code %d", __FUNCTION__, ret);
+                return ret;
+
+            }
+        }
+    }
+    return ret;
+}
+
+status_t Camera3SharedOutputStream::configureQueueLocked() {
+    status_t res;
+
+    if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
+        return res;
+    }
+
+    res = connectStreamSplitterLocked();
+    if (res != OK) {
+        ALOGE("Cannot connect to stream splitter: %s(%d)", strerror(-res), res);
+        return res;
+    }
+
+    res = configureConsumerQueueLocked();
+    if (res != OK) {
+        ALOGE("Failed to configureConsumerQueueLocked: %s(%d)", strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t Camera3SharedOutputStream::disconnectLocked() {
+    status_t res;
+    res = Camera3OutputStream::disconnectLocked();
+
+    if (mStreamSplitter != nullptr) {
+        mStreamSplitter->disconnect();
+    }
+
+    return res;
+}
+
+status_t Camera3SharedOutputStream::getEndpointUsage(uint32_t *usage) const {
+
+    status_t res = OK;
+    uint32_t u = 0;
+
+    if (mConsumer == nullptr) {
+        // Called before shared buffer queue is constructed.
+        *usage = getPresetConsumerUsage();
+
+        for (auto surface : mSurfaces) {
+            if (surface != nullptr) {
+                res = getEndpointUsageForSurface(&u, surface);
+                *usage |= u;
+            }
+        }
+    } else {
+        // Called after shared buffer queue is constructed.
+        res = getEndpointUsageForSurface(&u, mConsumer);
+        *usage |= u;
+    }
+
+    return res;
+}
+
+} // namespace camera3
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
new file mode 100644
index 0000000..cc96076
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+
+#include "Camera3StreamSplitter.h"
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3SharedOutputStream :
+        public Camera3OutputStream {
+public:
+    /**
+     * Set up a stream for formats that have 2 dimensions, with multiple
+     * surfaces. A valid stream set id needs to be set to support buffer
+     * sharing between multiple streams.
+     */
+    Camera3SharedOutputStream(int id, const std::vector<sp<Surface>>& surfaces,
+            uint32_t width, uint32_t height, int format,
+            uint32_t consumerUsage, android_dataspace dataSpace,
+            camera3_stream_rotation_t rotation, nsecs_t timestampOffset,
+            int setId = CAMERA3_STREAM_SET_ID_INVALID);
+
+    virtual ~Camera3SharedOutputStream();
+
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids);
+
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
+
+    virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers);
+
+private:
+    // Surfaces passed in constructor from app
+    std::vector<sp<Surface> > mSurfaces;
+
+    /**
+     * The Camera3StreamSplitter object this stream uses for stream
+     * sharing.
+     */
+    sp<Camera3StreamSplitter> mStreamSplitter;
+
+    /**
+     * Initialize stream splitter.
+     */
+    status_t connectStreamSplitterLocked();
+
+    virtual status_t configureQueueLocked();
+
+    virtual status_t disconnectLocked();
+
+    virtual status_t getEndpointUsage(uint32_t *usage) const;
+
+}; // class Camera3SharedOutputStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
new file mode 100644
index 0000000..c9f43aa
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#define LOG_TAG "Camera3StreamSplitter"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <gui/BufferItem.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/BufferQueue.h>
+#include <gui/Surface.h>
+
+#include <ui/GraphicBuffer.h>
+
+#include <binder/ProcessState.h>
+
+#include <utils/Trace.h>
+
+#include "Camera3StreamSplitter.h"
+
+namespace android {
+
+status_t Camera3StreamSplitter::connect(const std::vector<sp<Surface> >& surfaces,
+                                           uint32_t consumerUsage, size_t hal_max_buffers,
+                                           sp<Surface>& consumer) {
+    if (consumer != nullptr) {
+        ALOGE("%s: output Surface is not NULL", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mMutex);
+    status_t res = OK;
+
+    if (mOutputs.size() > 0 || mConsumer != nullptr) {
+        ALOGE("%s: StreamSplitter already connected", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    // Add output surfaces. This has to be before creating internal buffer queue
+    // in order to get max consumer side buffers.
+    for (size_t i = 0; i < surfaces.size(); i++) {
+        if (surfaces[i] == nullptr) {
+            ALOGE("%s: Fatal: surface is NULL", __FUNCTION__);
+            return BAD_VALUE;
+        }
+        res = addOutputLocked(surfaces[i], hal_max_buffers, OutputType::NonDeferred);
+        if (res != OK) {
+            ALOGE("%s: Failed to add output surface: %s(%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+    }
+
+    // Create buffer queue for input
+    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+
+    mBufferItemConsumer = new BufferItemConsumer(mConsumer, consumerUsage,
+                                                 mMaxConsumerBuffers);
+    if (mBufferItemConsumer == nullptr) {
+        return NO_MEMORY;
+    }
+    mConsumer->setConsumerName(getUniqueConsumerName());
+
+    mSurface = new Surface(mProducer);
+    if (mSurface == nullptr) {
+        return NO_MEMORY;
+    }
+    consumer = mSurface;
+
+    res = mConsumer->consumerConnect(this, /* controlledByApp */ false);
+
+    return res;
+}
+
+void Camera3StreamSplitter::disconnect() {
+    Mutex::Autolock lock(mMutex);
+
+    for (auto& output : mOutputs) {
+        output->disconnect(NATIVE_WINDOW_API_CAMERA);
+    }
+    mOutputs.clear();
+
+    if (mConsumer != nullptr) {
+        mConsumer->consumerDisconnect();
+        mConsumer.clear();
+    }
+
+    if (mBuffers.size() > 0) {
+        ALOGI("%zu buffers still being tracked", mBuffers.size());
+    }
+}
+
+Camera3StreamSplitter::~Camera3StreamSplitter() {
+    disconnect();
+}
+
+status_t Camera3StreamSplitter::addOutput(
+        const sp<Surface>& outputQueue, size_t hal_max_buffers) {
+    Mutex::Autolock lock(mMutex);
+    return addOutputLocked(outputQueue, hal_max_buffers, OutputType::Deferred);
+}
+
+status_t Camera3StreamSplitter::addOutputLocked(
+        const sp<Surface>& outputQueue, size_t hal_max_buffers,
+        OutputType outputType) {
+    if (outputQueue == nullptr) {
+        ALOGE("addOutput: outputQueue must not be NULL");
+        return BAD_VALUE;
+    }
+    if (hal_max_buffers < 1) {
+        ALOGE("%s: Camera HAL requested max_buffer count: %zu, requires at least 1",
+                __FUNCTION__, hal_max_buffers);
+        return BAD_VALUE;
+    }
+
+    sp<IGraphicBufferProducer> gbp = outputQueue->getIGraphicBufferProducer();
+    // Connect to the buffer producer
+    IGraphicBufferProducer::QueueBufferOutput queueBufferOutput;
+    sp<OutputListener> listener(new OutputListener(this, gbp));
+    IInterface::asBinder(gbp)->linkToDeath(listener);
+    status_t status = gbp->connect(listener, NATIVE_WINDOW_API_CAMERA,
+            /* producerControlledByApp */ true, &queueBufferOutput);
+    if (status != NO_ERROR) {
+        ALOGE("addOutput: failed to connect (%d)", status);
+       return status;
+    }
+
+    // Query consumer side buffer count, and update overall buffer count
+    int maxConsumerBuffers = 0;
+    status = static_cast<ANativeWindow*>(outputQueue.get())->query(
+            outputQueue.get(),
+            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
+    if (status != OK) {
+        ALOGE("%s: Unable to query consumer undequeued buffer count"
+              " for surface", __FUNCTION__);
+        return status;
+    }
+
+    if (maxConsumerBuffers > mMaxConsumerBuffers) {
+        if (outputType == OutputType::Deferred) {
+            ALOGE("%s: Fatal: Deferred surface has higher consumer buffer count"
+                  " %d than what's already configured %d", __FUNCTION__,
+                  maxConsumerBuffers, mMaxConsumerBuffers);
+            return BAD_VALUE;
+        }
+        mMaxConsumerBuffers = maxConsumerBuffers;
+    }
+
+    ALOGV("%s: Consumer wants %d buffers, HAL wants %zu", __FUNCTION__,
+            maxConsumerBuffers, hal_max_buffers);
+    size_t totalBufferCount = maxConsumerBuffers + hal_max_buffers;
+    status = native_window_set_buffer_count(outputQueue.get(),
+            totalBufferCount);
+    if (status != OK) {
+        ALOGE("%s: Unable to set buffer count for surface %p",
+                __FUNCTION__, outputQueue.get());
+        return status;
+    }
+
+    // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+    // We need skip these cases as timeout will disable the non-blocking (async) mode.
+    int32_t usage = 0;
+    static_cast<ANativeWindow*>(outputQueue.get())->query(
+            outputQueue.get(),
+            NATIVE_WINDOW_CONSUMER_USAGE_BITS, &usage);
+    if (!(usage & (GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_TEXTURE))) {
+        outputQueue->setDequeueTimeout(kDequeueBufferTimeout);
+    }
+
+    status = gbp->allowAllocation(false);
+    if (status != OK) {
+        ALOGE("%s: Failed to turn off allocation for outputQueue", __FUNCTION__);
+        return status;
+    }
+
+    // Add new entry into mOutputs
+    mOutputs.push_back(gbp);
+    return NO_ERROR;
+}
+
+String8 Camera3StreamSplitter::getUniqueConsumerName() {
+    static volatile int32_t counter = 0;
+    return String8::format("Camera3StreamSplitter-%d", android_atomic_inc(&counter));
+}
+
+status_t Camera3StreamSplitter::notifyRequestedSurfaces(
+        const std::vector<size_t>& surfaces) {
+    ATRACE_CALL();
+    Mutex::Autolock lock(mMutex);
+
+    mRequestedSurfaces.push_back(surfaces);
+    return OK;
+}
+
+
+void Camera3StreamSplitter::onFrameAvailable(const BufferItem& /* item */) {
+    ATRACE_CALL();
+    Mutex::Autolock lock(mMutex);
+
+    // The current policy is that if any one consumer is consuming buffers too
+    // slowly, the splitter will stall the rest of the outputs by not acquiring
+    // any more buffers from the input. This will cause back pressure on the
+    // input queue, slowing down its producer.
+
+    // If there are too many outstanding buffers, we block until a buffer is
+    // released back to the input in onBufferReleased
+    while (mOutstandingBuffers >= mMaxConsumerBuffers) {
+        mReleaseCondition.wait(mMutex);
+
+        // If the splitter is abandoned while we are waiting, the release
+        // condition variable will be broadcast, and we should just return
+        // without attempting to do anything more (since the input queue will
+        // also be abandoned).
+        if (mIsAbandoned) {
+            return;
+        }
+    }
+    // If the splitter is abandoned without reaching mMaxConsumerBuffers, just
+    // return without attempting to do anything more.
+    if (mIsAbandoned) {
+        return;
+    }
+
+    ++mOutstandingBuffers;
+
+    // Acquire and detach the buffer from the input
+    BufferItem bufferItem;
+    status_t status = mConsumer->acquireBuffer(&bufferItem, /* presentWhen */ 0);
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "acquiring buffer from input failed (%d)", status);
+
+    ALOGV("acquired buffer %#" PRIx64 " from input",
+            bufferItem.mGraphicBuffer->getId());
+
+    status = mConsumer->detachBuffer(bufferItem.mSlot);
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "detaching buffer from input failed (%d)", status);
+
+    IGraphicBufferProducer::QueueBufferInput queueInput(
+            bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp,
+            bufferItem.mDataSpace, bufferItem.mCrop,
+            static_cast<int32_t>(bufferItem.mScalingMode),
+            bufferItem.mTransform, bufferItem.mFence);
+
+    // Attach and queue the buffer to each of the outputs
+    std::vector<std::vector<size_t> >::iterator surfaces = mRequestedSurfaces.begin();
+    if (surfaces != mRequestedSurfaces.end()) {
+
+        LOG_ALWAYS_FATAL_IF(surfaces->size() == 0,
+                "requested surface ids shouldn't be empty");
+
+        // Initialize our reference count for this buffer
+        mBuffers[bufferItem.mGraphicBuffer->getId()] =
+                std::unique_ptr<BufferTracker>(
+                new BufferTracker(bufferItem.mGraphicBuffer, surfaces->size()));
+
+        for (auto id : *surfaces) {
+
+            LOG_ALWAYS_FATAL_IF(id >= mOutputs.size(),
+                    "requested surface id exceeding max registered ids");
+
+            int slot = BufferItem::INVALID_BUFFER_SLOT;
+            status = mOutputs[id]->attachBuffer(&slot, bufferItem.mGraphicBuffer);
+            if (status == NO_INIT) {
+                // If we just discovered that this output has been abandoned, note
+                // that, decrement the reference count so that we still release this
+                // buffer eventually, and move on to the next output
+                onAbandonedLocked();
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else if (status == WOULD_BLOCK) {
+                // If the output is async, attachBuffer may return WOULD_BLOCK
+                // indicating number of dequeued buffers has reached limit. In
+                // this case, simply decrement the reference count, and move on
+                // to the next output.
+                // TODO: Do we need to report BUFFER_ERROR for this result?
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else if (status == TIMED_OUT) {
+                // If attachBuffer times out due to the value set by
+                // setDequeueTimeout, simply decrement the reference count, and
+                // move on to the next output.
+                // TODO: Do we need to report BUFFER_ERROR for this result?
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else {
+                LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+                        "attaching buffer to output failed (%d)", status);
+            }
+
+            IGraphicBufferProducer::QueueBufferOutput queueOutput;
+            status = mOutputs[id]->queueBuffer(slot, queueInput, &queueOutput);
+            if (status == NO_INIT) {
+                // If we just discovered that this output has been abandoned, note
+                // that, increment the release count so that we still release this
+                // buffer eventually, and move on to the next output
+                onAbandonedLocked();
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else {
+                LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+                        "queueing buffer to output failed (%d)", status);
+            }
+
+            // If the queued buffer replaces a pending buffer in the async
+            // queue, no onBufferReleased is called by the buffer queue.
+            // Proactively trigger the callback to avoid buffer loss.
+            if (queueOutput.bufferReplaced) {
+                onBufferReleasedByOutputLocked(mOutputs[id]);
+            }
+
+            ALOGV("queued buffer %#" PRIx64 " to output %p",
+                    bufferItem.mGraphicBuffer->getId(), mOutputs[id].get());
+        }
+
+        mRequestedSurfaces.erase(surfaces);
+    }
+}
+
+void Camera3StreamSplitter::onBufferReleasedByOutput(
+        const sp<IGraphicBufferProducer>& from) {
+    ATRACE_CALL();
+    Mutex::Autolock lock(mMutex);
+
+    onBufferReleasedByOutputLocked(from);
+}
+
+void Camera3StreamSplitter::onBufferReleasedByOutputLocked(
+        const sp<IGraphicBufferProducer>& from) {
+
+    sp<GraphicBuffer> buffer;
+    sp<Fence> fence;
+    status_t status = from->detachNextBuffer(&buffer, &fence);
+    if (status == NO_INIT) {
+        // If we just discovered that this output has been abandoned, note that,
+        // but we can't do anything else, since buffer is invalid
+        onAbandonedLocked();
+        return;
+    } else {
+        LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+                "detaching buffer from output failed (%d)", status);
+    }
+
+    ALOGV("detached buffer %#" PRIx64 " from output %p",
+          buffer->getId(), from.get());
+
+    BufferTracker& tracker = *(mBuffers[buffer->getId()]);
+
+    // Merge the release fence of the incoming buffer so that the fence we send
+    // back to the input includes all of the outputs' fences
+    tracker.mergeFence(fence);
+
+    // Check to see if this is the last outstanding reference to this buffer
+    size_t referenceCount = tracker.decrementReferenceCountLocked();
+    ALOGV("buffer %#" PRIx64 " reference count %zu", buffer->getId(),
+            referenceCount);
+    if (referenceCount > 0) {
+        return;
+    }
+
+    // If we've been abandoned, we can't return the buffer to the input, so just
+    // stop tracking it and move on
+    if (mIsAbandoned) {
+        mBuffers.erase(buffer->getId());
+        return;
+    }
+
+    // Attach and release the buffer back to the input
+    int consumerSlot = BufferItem::INVALID_BUFFER_SLOT;
+    status = mConsumer->attachBuffer(&consumerSlot, tracker.getBuffer());
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "attaching buffer to input failed (%d)", status);
+
+    status = mConsumer->releaseBuffer(consumerSlot, /* frameNumber */ 0,
+            EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, tracker.getMergedFence());
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "releasing buffer to input failed (%d)", status);
+
+    ALOGV("released buffer %#" PRIx64 " to input", buffer->getId());
+
+    // We no longer need to track the buffer once it has been returned to the
+    // input
+    mBuffers.erase(buffer->getId());
+
+    // Notify any waiting onFrameAvailable calls
+    --mOutstandingBuffers;
+    mReleaseCondition.signal();
+}
+
+void Camera3StreamSplitter::onAbandonedLocked() {
+    ALOGE("one of my outputs has abandoned me");
+    if (!mIsAbandoned && mConsumer != nullptr) {
+        mConsumer->consumerDisconnect();
+    }
+    mIsAbandoned = true;
+    mReleaseCondition.broadcast();
+}
+
+Camera3StreamSplitter::OutputListener::OutputListener(
+        wp<Camera3StreamSplitter> splitter,
+        wp<IGraphicBufferProducer> output)
+      : mSplitter(splitter), mOutput(output) {}
+
+void Camera3StreamSplitter::OutputListener::onBufferReleased() {
+    sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+    sp<IGraphicBufferProducer> output = mOutput.promote();
+    if (splitter != nullptr && output != nullptr) {
+        splitter->onBufferReleasedByOutput(output);
+    }
+}
+
+void Camera3StreamSplitter::OutputListener::binderDied(const wp<IBinder>& /* who */) {
+    sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+    if (splitter != nullptr) {
+        Mutex::Autolock lock(splitter->mMutex);
+        splitter->onAbandonedLocked();
+    }
+}
+
+Camera3StreamSplitter::BufferTracker::BufferTracker(
+        const sp<GraphicBuffer>& buffer, size_t referenceCount)
+      : mBuffer(buffer), mMergedFence(Fence::NO_FENCE),
+        mReferenceCount(referenceCount) {}
+
+void Camera3StreamSplitter::BufferTracker::mergeFence(const sp<Fence>& with) {
+    mMergedFence = Fence::merge(String8("Camera3StreamSplitter"), mMergedFence, with);
+}
+
+size_t Camera3StreamSplitter::BufferTracker::decrementReferenceCountLocked() {
+    if (mReferenceCount > 0)
+        --mReferenceCount;
+    return mReferenceCount;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
new file mode 100644
index 0000000..92371ff
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_STREAMSPLITTER_H
+#define ANDROID_SERVERS_STREAMSPLITTER_H
+
+#include <gui/IConsumerListener.h>
+#include <gui/IProducerListener.h>
+#include <gui/BufferItemConsumer.h>
+
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+class GraphicBuffer;
+class IGraphicBufferConsumer;
+class IGraphicBufferProducer;
+
+// Camera3StreamSplitter is an autonomous class that manages one input BufferQueue
+// and multiple output BufferQueues. By using the buffer attach and detach logic
+// in BufferQueue, it is able to present the illusion of a single split
+// BufferQueue, where each buffer queued to the input is available to be
+// acquired by each of the outputs, and is able to be dequeued by the input
+// again only once all of the outputs have released it.
+class Camera3StreamSplitter : public BnConsumerListener {
+public:
+
+    // Constructor
+    Camera3StreamSplitter() = default;
+
+    // Connect to the stream splitter by creating buffer queue and connecting it
+    // with output surfaces.
+    status_t connect(const std::vector<sp<Surface> >& surfaces,
+            uint32_t consumerUsage, size_t hal_max_buffers,
+            sp<Surface>& consumer);
+
+    // addOutput adds an output BufferQueue to the splitter. The splitter
+    // connects to outputQueue as a CPU producer, and any buffers queued
+    // to the input will be queued to each output. It is assumed that all of the
+    // outputs are added before any buffers are queued on the input. If any
+    // output is abandoned by its consumer, the splitter will abandon its input
+    // queue (see onAbandoned).
+    //
+    // A return value other than NO_ERROR means that an error has occurred and
+    // outputQueue has not been added to the splitter. BAD_VALUE is returned if
+    // outputQueue is NULL. See IGraphicBufferProducer::connect for explanations
+    // of other error codes.
+    status_t addOutput(const sp<Surface>& outputQueue, size_t hal_max_buffers);
+
+    // Request surfaces for a particular frame number. The requested surfaces
+    // are stored in a FIFO queue. And when the buffer becomes available from the
+    // input queue, the registered surfaces are used to decide which output is
+    // the buffer sent to.
+    status_t notifyRequestedSurfaces(const std::vector<size_t>& surfaces);
+
+    // Disconnect the buffer queue from output surfaces.
+    void disconnect();
+
+private:
+    // From IConsumerListener
+    //
+    // During this callback, we store some tracking information, detach the
+    // buffer from the input, and attach it to each of the outputs. This call
+    // can block if there are too many outstanding buffers. If it blocks, it
+    // will resume when onBufferReleasedByOutput releases a buffer back to the
+    // input.
+    void onFrameAvailable(const BufferItem& item) override;
+
+    // From IConsumerListener
+    // We don't care about released buffers because we detach each buffer as
+    // soon as we acquire it. See the comment for onBufferReleased below for
+    // some clarifying notes about the name.
+    void onBuffersReleased() override {}
+
+    // From IConsumerListener
+    // We don't care about sideband streams, since we won't be splitting them
+    void onSidebandStreamChanged() override {}
+
+    // This is the implementation of the onBufferReleased callback from
+    // IProducerListener. It gets called from an OutputListener (see below), and
+    // 'from' is which producer interface from which the callback was received.
+    //
+    // During this callback, we detach the buffer from the output queue that
+    // generated the callback, update our state tracking to see if this is the
+    // last output releasing the buffer, and if so, release it to the input.
+    // If we release the buffer to the input, we allow a blocked
+    // onFrameAvailable call to proceed.
+    void onBufferReleasedByOutput(const sp<IGraphicBufferProducer>& from);
+
+    // This is the implementation of onBufferReleasedByOutput without the mutex locked.
+    // It could either be called from onBufferReleasedByOutput or from
+    // onFrameAvailable when a buffer in the async buffer queue is overwritten.
+    void onBufferReleasedByOutputLocked(const sp<IGraphicBufferProducer>& from);
+
+    // When this is called, the splitter disconnects from (i.e., abandons) its
+    // input queue and signals any waiting onFrameAvailable calls to wake up.
+    // It still processes callbacks from other outputs, but only detaches their
+    // buffers so they can continue operating until they run out of buffers to
+    // acquire. This must be called with mMutex locked.
+    void onAbandonedLocked();
+
+    // This is a thin wrapper class that lets us determine which BufferQueue
+    // the IProducerListener::onBufferReleased callback is associated with. We
+    // create one of these per output BufferQueue, and then pass the producer
+    // into onBufferReleasedByOutput above.
+    class OutputListener : public BnProducerListener,
+                           public IBinder::DeathRecipient {
+    public:
+        OutputListener(wp<Camera3StreamSplitter> splitter,
+                wp<IGraphicBufferProducer> output);
+        virtual ~OutputListener() = default;
+
+        // From IProducerListener
+        void onBufferReleased() override;
+
+        // From IBinder::DeathRecipient
+        void binderDied(const wp<IBinder>& who) override;
+
+    private:
+        wp<Camera3StreamSplitter> mSplitter;
+        wp<IGraphicBufferProducer> mOutput;
+    };
+
+    class BufferTracker {
+    public:
+        BufferTracker(const sp<GraphicBuffer>& buffer, size_t referenceCount);
+        ~BufferTracker() = default;
+
+        const sp<GraphicBuffer>& getBuffer() const { return mBuffer; }
+        const sp<Fence>& getMergedFence() const { return mMergedFence; }
+
+        void mergeFence(const sp<Fence>& with);
+
+        // Returns the new value
+        // Only called while mMutex is held
+        size_t decrementReferenceCountLocked();
+
+    private:
+
+        // Disallow copying
+        BufferTracker(const BufferTracker& other);
+        BufferTracker& operator=(const BufferTracker& other);
+
+        sp<GraphicBuffer> mBuffer; // One instance that holds this native handle
+        sp<Fence> mMergedFence;
+        size_t mReferenceCount;
+    };
+
+    // A deferred output is an output being added to the splitter after
+    // connect() call, whereas a non deferred output is added within connect()
+    // call.
+    enum class OutputType { NonDeferred, Deferred };
+
+    // Must be accessed through RefBase
+    virtual ~Camera3StreamSplitter();
+
+    status_t addOutputLocked(const sp<Surface>& outputQueue,
+                             size_t hal_max_buffers, OutputType outputType);
+
+    // Get unique name for the buffer queue consumer
+    static String8 getUniqueConsumerName();
+
+    // Max consumer side buffers for deferred surface. This will be used as a
+    // lower bound for overall consumer side max buffers.
+    static const int MAX_BUFFERS_DEFERRED_OUTPUT = 2;
+    int mMaxConsumerBuffers = MAX_BUFFERS_DEFERRED_OUTPUT;
+
+    static const nsecs_t kDequeueBufferTimeout   = s2ns(1); // 1 sec
+
+    // mIsAbandoned is set to true when an output dies. Once the Camera3StreamSplitter
+    // has been abandoned, it will continue to detach buffers from other
+    // outputs, but it will disconnect from the input and not attempt to
+    // communicate with it further.
+    bool mIsAbandoned = false;
+
+    Mutex mMutex;
+    Condition mReleaseCondition;
+    int mOutstandingBuffers = 0;
+
+    sp<IGraphicBufferProducer> mProducer;
+    sp<IGraphicBufferConsumer> mConsumer;
+    sp<BufferItemConsumer> mBufferItemConsumer;
+    sp<Surface> mSurface;
+
+    std::vector<sp<IGraphicBufferProducer> > mOutputs;
+    // Tracking which outputs should the buffer be attached and queued
+    // to for each input buffer.
+    std::vector<std::vector<size_t> > mRequestedSurfaces;
+
+    // Map of GraphicBuffer IDs (GraphicBuffer::getId()) to buffer tracking
+    // objects (which are mostly for counting how many outputs have released the
+    // buffer, but also contain merged release fences).
+    std::unordered_map<uint64_t, std::unique_ptr<BufferTracker> > mBuffers;
+};
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/utils/CameraTraces.cpp b/services/camera/libcameraservice/utils/CameraTraces.cpp
index 374dc5e..0198690 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.cpp
+++ b/services/camera/libcameraservice/utils/CameraTraces.cpp
@@ -74,7 +74,7 @@
         return BAD_VALUE;
     }
 
-    dprintf(fd, "Camera traces (%zu):\n", pcsList.size());
+    dprintf(fd, "== Camera error traces (%zu): ==\n", pcsList.size());
 
     if (pcsList.empty()) {
         dprintf(fd, "  No camera traces collected.\n");
diff --git a/services/mediaanalytics/Android.mk b/services/mediaanalytics/Android.mk
index 76a5c1c..ef49df4 100644
--- a/services/mediaanalytics/Android.mk
+++ b/services/mediaanalytics/Android.mk
@@ -5,28 +5,43 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-	main_mediaanalytics.cpp
+    main_mediametrics.cpp          \
+    MediaAnalyticsService.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libcutils \
-	liblog \
-	libmedia \
-	libmediaanalyticsservice \
-	libutils \
-	libbinder \
-	libicuuc
+    libcutils                   \
+    liblog                      \
+    libmedia                    \
+    libutils                    \
+    libbinder                   \
+    libdl                       \
+    libgui                      \
+    libmedia                    \
+    libmediautils               \
+    libstagefright_foundation   \
+    libutils
 
 LOCAL_STATIC_LIBRARIES := \
-        libicuandroid_utils \
         libregistermsext
 
-LOCAL_C_INCLUDES := \
-    frameworks/av/media/libmediaanalyticsservice
+LOCAL_C_INCLUDES :=                                                 \
+    $(TOP)/frameworks/av/media/libstagefright/include               \
+    $(TOP)/frameworks/av/media/libstagefright/rtsp                  \
+    $(TOP)/frameworks/av/media/libstagefright/wifi-display          \
+    $(TOP)/frameworks/av/media/libstagefright/webm                  \
+    $(TOP)/frameworks/av/include/media                              \
+    $(TOP)/frameworks/av/include/camera                             \
+    $(TOP)/frameworks/native/include/media/openmax                  \
+    $(TOP)/frameworks/native/include/media/hardware                 \
+    $(TOP)/external/tremolo/Tremolo                                 \
+    libcore/include
 
-LOCAL_MODULE:= mediaanalytics
 
-LOCAL_INIT_RC := mediaanalytics.rc
+LOCAL_MODULE:= mediametrics
 
-LOCAL_CFLAGS := -Werror -Wall
+LOCAL_INIT_RC := mediametrics.rc
+
+LOCAL_CFLAGS := -Werror -Wall -Wno-error=deprecated-declarations
+LOCAL_CLANG := true
 
 include $(BUILD_EXECUTABLE)
diff --git a/media/libmediaanalyticsservice/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
similarity index 84%
rename from media/libmediaanalyticsservice/MediaAnalyticsService.cpp
rename to services/mediaanalytics/MediaAnalyticsService.cpp
index 1d0246d..35c1f5b 100644
--- a/media/libmediaanalyticsservice/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -85,7 +85,7 @@
 
 void MediaAnalyticsService::instantiate() {
     defaultServiceManager()->addService(
-            String16("media.analytics"), new MediaAnalyticsService());
+            String16("media.metrics"), new MediaAnalyticsService());
 }
 
 // XXX: add dynamic controls for mMaxRecords
@@ -125,18 +125,46 @@
 
     MediaAnalyticsItem::SessionID_t id = MediaAnalyticsItem::SessionIDInvalid;
 
-    // we control these, not using whatever the user might have sent
+    // we control these, generally not trusting user input
     nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
     item->setTimestamp(now);
     int pid = IPCThreadState::self()->getCallingPid();
-    item->setPid(pid);
     int uid = IPCThreadState::self()->getCallingUid();
-    item->setUid(uid);
+
+    int uid_given = item->getUid();
+    int pid_given = item->getPid();
+
+    // although we do make exceptions for particular client uids
+    // that we know we trust.
+    //
+    bool isTrusted = false;
+
+    switch (uid)  {
+        case AID_MEDIA:
+        case AID_MEDIA_CODEC:
+        case AID_MEDIA_EX:
+        case AID_MEDIA_DRM:
+            // trusted source, only override default values
+            isTrusted = true;
+            if (uid_given == (-1)) {
+                item->setUid(uid);
+            }
+            if (pid_given == (-1)) {
+                item->setPid(pid);
+            }
+            break;
+        default:
+            isTrusted = false;
+            item->setPid(pid);
+            item->setUid(uid);
+            break;
+    }
+
 
     mItemsSubmitted++;
 
     // validate the record; we discard if we don't like it
-    if (contentValid(item) == false) {
+    if (contentValid(item, isTrusted) == false) {
         delete item;
         return MediaAnalyticsItem::SessionIDInvalid;
     }
@@ -252,6 +280,7 @@
     nsecs_t ts_since = 0;
     String16 clearOption("-clear");
     String16 sinceOption("-since");
+    String16 helpOption("-help");
     int n = args.size();
     for (int i = 0; i < n; i++) {
         String8 myarg(args[i]);
@@ -270,19 +299,29 @@
             } else {
                 ts_since = 0;
             }
+            // command line is milliseconds; internal units are nano-seconds
+            ts_since *= 1000*1000;
+        } else if (args[i] == helpOption) {
+            result.append("Recognized parameters:\n");
+            result.append("-help        this help message\n");
+            result.append("-clear       clears out saved records\n");
+            result.append("-since XXX   include records since XXX\n");
+            result.append("             (XXX is milliseconds since the UNIX epoch)\n");
+            write(fd, result.string(), result.size());
+            return NO_ERROR;
         }
     }
 
     Mutex::Autolock _l(mLock);
 
-    snprintf(buffer, SIZE, "Dump of the mediaanalytics process:\n");
+    snprintf(buffer, SIZE, "Dump of the mediametrics process:\n");
     result.append(buffer);
 
     int enabled = MediaAnalyticsItem::isEnabled();
     if (enabled) {
-        snprintf(buffer, SIZE, "Analytics gathering: enabled\n");
+        snprintf(buffer, SIZE, "Metrics gathering: enabled\n");
     } else {
-        snprintf(buffer, SIZE, "Analytics gathering: DISABLED via property\n");
+        snprintf(buffer, SIZE, "Metrics gathering: DISABLED via property\n");
     }
     result.append(buffer);
 
@@ -300,11 +339,11 @@
     }
 
     // show the recently recorded records
-    snprintf(buffer, sizeof(buffer), "\nFinalized Analytics (oldest first):\n");
+    snprintf(buffer, sizeof(buffer), "\nFinalized Metrics (oldest first):\n");
     result.append(buffer);
     result.append(this->dumpQueue(mFinalized, ts_since));
 
-    snprintf(buffer, sizeof(buffer), "\nIn-Progress Analytics (newest first):\n");
+    snprintf(buffer, sizeof(buffer), "\nIn-Progress Metrics (newest first):\n");
     result.append(buffer);
     result.append(this->dumpQueue(mOpen, ts_since));
 
@@ -336,8 +375,6 @@
 }
 
 String8 MediaAnalyticsService::dumpQueue(List<MediaAnalyticsItem *> *theList, nsecs_t ts_since) {
-    const size_t SIZE = 512;
-    char buffer[SIZE];
     String8 result;
     int slot = 0;
 
@@ -351,12 +388,7 @@
                 continue;
             }
             AString entry = (*it)->toString();
-            snprintf(buffer, sizeof(buffer), "%4d: %s",
-                        slot, entry.c_str());
-            result.append(buffer);
-            buffer[0] = '\n';
-            buffer[1] = '\0';
-            result.append(buffer);
+            result.appendFormat("%5d: %s\n", slot, entry.c_str());
             slot++;
         }
     }
@@ -402,9 +434,7 @@
                     oitem->getTimestamp());
             }
             l->erase(l->begin());
-        ALOGD("drop record at %s:%d", __FILE__, __LINE__);
             delete oitem;
-        ALOGD("[done] drop record at %s:%d", __FILE__, __LINE__);
             mItemsDiscarded++;
         }
     }
@@ -510,10 +540,34 @@
     }
 }
 
-// are the contents good
-bool MediaAnalyticsService::contentValid(MediaAnalyticsItem *) {
+static AString allowedKeys[] =
+{
+    "codec",
+    "extractor"
+};
 
-    // certain keys require certain uids
+static const int nAllowedKeys = sizeof(allowedKeys) / sizeof(allowedKeys[0]);
+
+// are the contents good
+bool MediaAnalyticsService::contentValid(MediaAnalyticsItem *item, bool isTrusted) {
+
+    // untrusted uids can only send us a limited set of keys
+    if (isTrusted == false) {
+        // restrict to a specific set of keys
+        AString key = item->getKey();
+
+        size_t i;
+        for(i = 0; i < nAllowedKeys; i++) {
+            if (key == allowedKeys[i]) {
+                break;
+            }
+        }
+        if (i == nAllowedKeys) {
+            ALOGD("Ignoring (key): %s", item->toString().c_str());
+            return false;
+        }
+    }
+
     // internal consistency
 
     return true;
diff --git a/media/libmediaanalyticsservice/MediaAnalyticsService.h b/services/mediaanalytics/MediaAnalyticsService.h
similarity index 97%
rename from media/libmediaanalyticsservice/MediaAnalyticsService.h
rename to services/mediaanalytics/MediaAnalyticsService.h
index 3e2298f..d2b0f09 100644
--- a/media/libmediaanalyticsservice/MediaAnalyticsService.h
+++ b/services/mediaanalytics/MediaAnalyticsService.h
@@ -69,7 +69,7 @@
     int32_t mMaxRecords;
 
     // input validation after arrival from client
-    bool contentValid(MediaAnalyticsItem *);
+    bool contentValid(MediaAnalyticsItem *item, bool isTrusted);
     bool rateLimited(MediaAnalyticsItem *);
 
     // the ones that are still open
diff --git a/services/mediaanalytics/main_mediaanalytics.cpp b/services/mediaanalytics/main_mediametrics.cpp
similarity index 87%
rename from services/mediaanalytics/main_mediaanalytics.cpp
rename to services/mediaanalytics/main_mediametrics.cpp
index 672d13d..8020a03 100644
--- a/services/mediaanalytics/main_mediaanalytics.cpp
+++ b/services/mediaanalytics/main_mediametrics.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "mediaanalytics"
+#define LOG_TAG "mediametrics"
 //#define LOG_NDEBUG 0
 
 #include <binder/IPCThreadState.h>
@@ -24,7 +24,6 @@
 //#include "RegisterExtensions.h"
 
 // from LOCAL_C_INCLUDES
-#include "IcuUtils.h"
 #include "MediaAnalyticsService.h"
 
 using namespace android;
@@ -34,16 +33,16 @@
     signal(SIGPIPE, SIG_IGN);
 
     // to match the service name
-    // we're replacing "/system/bin/mediaanalytics" with "media.analytics"
+    // we're replacing "/system/bin/mediametrics" with "media.metrics"
     // we add a ".", but discard the path components: we finish with a shorter string
-    strcpy(argv[0], "media.analytics");
+    strcpy(argv[0], "media.metrics");
 
     sp<ProcessState> proc(ProcessState::self());
     sp<IServiceManager> sm(defaultServiceManager());
     ALOGI("ServiceManager: %p", sm.get());
 
-    InitializeIcuOrDie();
     MediaAnalyticsService::instantiate();
+
     ProcessState::self()->startThreadPool();
     IPCThreadState::self()->joinThreadPool();
 }
diff --git a/services/mediaanalytics/mediaanalytics.rc b/services/mediaanalytics/mediametrics.rc
similarity index 69%
rename from services/mediaanalytics/mediaanalytics.rc
rename to services/mediaanalytics/mediametrics.rc
index 0af69f5..3829f8c 100644
--- a/services/mediaanalytics/mediaanalytics.rc
+++ b/services/mediaanalytics/mediametrics.rc
@@ -1,4 +1,4 @@
-service mediaanalytics /system/bin/mediaanalytics
+service mediametrics /system/bin/mediametrics
     class main
     user media
     ioprio rt 4
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index a5f0751..4cbf737 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -17,7 +17,8 @@
 LOCAL_REQUIRED_MODULES_arm := mediacodec-seccomp.policy
 LOCAL_SRC_FILES := main_codecservice.cpp minijail/minijail.cpp
 LOCAL_SHARED_LIBRARIES := libmedia libmediacodecservice libbinder libutils \
-	liblog libminijail
+    liblog libminijail libcutils \
+    android.hardware.media.omx@1.0
 LOCAL_C_INCLUDES := \
     $(TOP)/frameworks/av/media/libstagefright \
     $(TOP)/frameworks/native/include/media/openmax
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index a2868c1..f6cde85 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -25,11 +25,14 @@
 #include <binder/ProcessState.h>
 #include <binder/IServiceManager.h>
 #include <utils/Log.h>
+#include <cutils/properties.h>
 
 // from LOCAL_C_INCLUDES
 #include "MediaCodecService.h"
 #include "minijail/minijail.h"
 
+#include <android/hardware/media/omx/1.0/IOmx.h>
+
 using namespace android;
 
 int main(int argc __unused, char** argv)
@@ -42,6 +45,21 @@
     sp<ProcessState> proc(ProcessState::self());
     sp<IServiceManager> sm = defaultServiceManager();
     MediaCodecService::instantiate();
+
+    // Treble
+    bool useTrebleOmx = bool(property_get_bool("debug.treble_omx", 0));
+    if (useTrebleOmx) {
+        using namespace ::android::hardware::media::omx::V1_0;
+        sp<IOmx> omx = IOmx::getService(true);
+        if (omx == nullptr) {
+            ALOGE("Cannot create a Treble IOmx service.");
+        } else if (omx->registerAsService("default") != OK) {
+            ALOGE("Cannot register a Treble IOmx service.");
+        } else {
+            ALOGV("Treble IOmx service created.");
+        }
+    }
+
     ProcessState::self()->startThreadPool();
     IPCThreadState::self()->joinThreadPool();
 }
diff --git a/services/mediacodec/minijail/minijail.cpp b/services/mediacodec/minijail/minijail.cpp
index 7926380..463f161 100644
--- a/services/mediacodec/minijail/minijail.cpp
+++ b/services/mediacodec/minijail/minijail.cpp
@@ -19,7 +19,8 @@
 
 #include <unistd.h>
 
-#include <android/log.h>
+#include <log/log.h>
+
 #include <libminijail.h>
 
 #include "minijail.h"
diff --git a/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy b/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
index 0afaa15..b7603bc 100644
--- a/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
+++ b/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
@@ -12,6 +12,14 @@
 dup: 1
 ppoll: 1
 mmap2: 1
+
+# mremap: Ensure |flags| are (MREMAP_MAYMOVE | MREMAP_FIXED) TODO: Once minijail
+# parser support for '<' is in this needs to be modified to also prevent
+# |old_address| and |new_address| from touching the exception vector page, which
+# on ARM is statically loaded at 0xffff 0000. See
+# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0211h/Babfeega.html
+# for more details.
+mremap: arg3 == 3
 munmap: 1
 mprotect: 1
 madvise: 1
@@ -35,6 +43,7 @@
 sched_yield: 1
 nanosleep: 1
 lseek: 1
+_llseek: 1
 sched_get_priority_max: 1
 sched_get_priority_min: 1
 statfs64: 1
diff --git a/services/mediadrm/Android.mk b/services/mediadrm/Android.mk
index 73109e1..2bf2201 100644
--- a/services/mediadrm/Android.mk
+++ b/services/mediadrm/Android.mk
@@ -16,7 +16,6 @@
 
 include $(CLEAR_VARS)
 
-
 LOCAL_SRC_FILES:= \
     MediaDrmService.cpp \
     main_mediadrmserver.cpp
@@ -25,9 +24,19 @@
     libbinder \
     liblog \
     libmediadrm \
-    libutils \
+    libutils
+ifeq ($(ENABLE_TREBLE), true)
+LOCAL_SHARED_LIBRARIES += \
+    libhidlbase \
+    libhidlmemory \
+    android.hidl.base@1.0 \
+    android.hardware.drm@1.0
+endif
 
 LOCAL_CFLAGS += -Wall -Wextra -Werror
+ifeq ($(ENABLE_TREBLE), true)
+LOCAL_CFLAGS += -DENABLE_TREBLE=1
+endif
 
 LOCAL_MODULE:= mediadrmserver
 LOCAL_32_BIT_ONLY := true
diff --git a/services/mediadrm/MediaDrmService.cpp b/services/mediadrm/MediaDrmService.cpp
index 331c568..e579dd8 100644
--- a/services/mediadrm/MediaDrmService.cpp
+++ b/services/mediadrm/MediaDrmService.cpp
@@ -21,11 +21,16 @@
 #define LOG_TAG "MediaDrmService"
 
 #include "MediaDrmService.h"
-
 #include <binder/IServiceManager.h>
+#include <utils/Log.h>
+
+#ifdef ENABLE_TREBLE
+#include <media/CryptoHal.h>
+#include <media/DrmHal.h>
+#else
 #include <media/Crypto.h>
 #include <media/Drm.h>
-#include <utils/Log.h>
+#endif
 
 namespace android {
 
@@ -35,11 +40,19 @@
 }
 
 sp<ICrypto> MediaDrmService::makeCrypto() {
+#ifdef ENABLE_TREBLE
+    return new CryptoHal;
+#else
     return new Crypto;
+#endif
 }
 
 sp<IDrm> MediaDrmService::makeDrm() {
+#ifdef ENABLE_TREBLE
+    return new DrmHal;
+#else
     return new Drm;
+#endif
 }
 
 } // namespace android
diff --git a/services/mediaextractor/minijail/minijail.cpp b/services/mediaextractor/minijail/minijail.cpp
index 8291633..c44d00d 100644
--- a/services/mediaextractor/minijail/minijail.cpp
+++ b/services/mediaextractor/minijail/minijail.cpp
@@ -19,7 +19,8 @@
 
 #include <unistd.h>
 
-#include <android/log.h>
+#include <log/log.h>
+
 #include <libminijail.h>
 
 #include "minijail.h"
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 7346f51..78bb587 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -34,6 +34,31 @@
 
 namespace android {
 
+namespace {
+
+class DeathNotifier : public IBinder::DeathRecipient {
+public:
+    DeathNotifier(const wp<ResourceManagerService> &service, int pid, int64_t clientId)
+        : mService(service), mPid(pid), mClientId(clientId) {}
+
+    virtual void binderDied(const wp<IBinder> & /* who */) override {
+        // Don't check for pid validity since we know it's already dead.
+        sp<ResourceManagerService> service = mService.promote();
+        if (service == nullptr) {
+            ALOGW("ResourceManagerService is dead as well.");
+            return;
+        }
+        service->removeResource(mPid, mClientId, false);
+    }
+
+private:
+    wp<ResourceManagerService> mService;
+    int mPid;
+    int64_t mClientId;
+};
+
+}  // namespace
+
 template <typename T>
 static String8 getString(const Vector<T> &items) {
     String8 itemsStr;
@@ -214,17 +239,25 @@
     ResourceInfo& info = getResourceInfoForEdit(clientId, client, infos);
     // TODO: do the merge instead of append.
     info.resources.appendVector(resources);
+    if (info.deathNotifier == nullptr) {
+        info.deathNotifier = new DeathNotifier(this, pid, clientId);
+        IInterface::asBinder(client)->linkToDeath(info.deathNotifier);
+    }
     notifyResourceGranted(pid, resources);
 }
 
 void ResourceManagerService::removeResource(int pid, int64_t clientId) {
+    removeResource(pid, clientId, true);
+}
+
+void ResourceManagerService::removeResource(int pid, int64_t clientId, bool checkValid) {
     String8 log = String8::format(
             "removeResource(pid %d, clientId %lld)",
             pid, (long long) clientId);
     mServiceLog->add(log);
 
     Mutex::Autolock lock(mLock);
-    if (!mProcessInfo->isValidPid(pid)) {
+    if (checkValid && !mProcessInfo->isValidPid(pid)) {
         ALOGE("Rejected removeResource call with invalid pid.");
         return;
     }
@@ -237,6 +270,7 @@
     ResourceInfos &infos = mMap.editValueAt(index);
     for (size_t j = 0; j < infos.size(); ++j) {
         if (infos[j].clientId == clientId) {
+            IInterface::asBinder(infos[j].client)->unlinkToDeath(infos[j].deathNotifier);
             j = infos.removeAt(j);
             found = true;
             break;
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index 2a4a6b2..9e97ac0 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -36,6 +36,7 @@
 struct ResourceInfo {
     int64_t clientId;
     sp<IResourceManagerClient> client;
+    sp<IBinder::DeathRecipient> deathNotifier;
     Vector<MediaResource> resources;
 };
 
@@ -70,6 +71,8 @@
     // Returns true if any resource has been reclaimed, otherwise returns false.
     virtual bool reclaimResource(int callingPid, const Vector<MediaResource> &resources);
 
+    void removeResource(int pid, int64_t clientId, bool checkValid);
+
 protected:
     virtual ~ResourceManagerService();
 
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
new file mode 100644
index 0000000..dfa9753
--- /dev/null
+++ b/services/oboeservice/AAudioService.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <time.h>
+#include <pthread.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+#include "HandleTracker.h"
+#include "IAAudioService.h"
+#include "AAudioServiceDefinitions.h"
+#include "AAudioService.h"
+#include "AAudioServiceStreamFakeHal.h"
+
+using namespace android;
+using namespace aaudio;
+
+typedef enum
+{
+    AAUDIO_HANDLE_TYPE_DUMMY1, // TODO remove DUMMYs
+    AAUDIO_HANDLE_TYPE_DUMMY2, // make server handles different than client
+    AAUDIO_HANDLE_TYPE_STREAM,
+    AAUDIO_HANDLE_TYPE_COUNT
+} aaudio_service_handle_type_t;
+static_assert(AAUDIO_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
+
+android::AAudioService::AAudioService()
+    : BnAAudioService() {
+}
+
+AAudioService::~AAudioService() {
+}
+
+aaudio_handle_t AAudioService::openStream(aaudio::AAudioStreamRequest &request,
+                                                aaudio::AAudioStreamConfiguration &configuration) {
+    AAudioServiceStreamBase *serviceStream =  new AAudioServiceStreamFakeHal();
+    ALOGD("AAudioService::openStream(): created serviceStream = %p", serviceStream);
+    aaudio_result_t result = serviceStream->open(request, configuration);
+    if (result < 0) {
+        ALOGE("AAudioService::openStream(): open returned %d", result);
+        return result;
+    } else {
+        AAudioStream handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
+        ALOGD("AAudioService::openStream(): handle = 0x%08X", handle);
+        if (handle < 0) {
+            delete serviceStream;
+        }
+        return handle;
+    }
+}
+
+aaudio_result_t AAudioService::closeStream(aaudio_handle_t streamHandle) {
+    AAudioServiceStreamBase *serviceStream = (AAudioServiceStreamBase *)
+            mHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM,
+                                  streamHandle);
+    ALOGD("AAudioService.closeStream(0x%08X)", streamHandle);
+    if (serviceStream != nullptr) {
+        ALOGD("AAudioService::closeStream(): deleting serviceStream = %p", serviceStream);
+        delete serviceStream;
+        return AAUDIO_OK;
+    }
+    return AAUDIO_ERROR_INVALID_HANDLE;
+}
+
+AAudioServiceStreamBase *AAudioService::convertHandleToServiceStream(
+        aaudio_handle_t streamHandle) const {
+    return (AAudioServiceStreamBase *) mHandleTracker.get(AAUDIO_HANDLE_TYPE_STREAM,
+                              (aaudio_handle_t)streamHandle);
+}
+
+aaudio_result_t AAudioService::getStreamDescription(
+                aaudio_handle_t streamHandle,
+                aaudio::AudioEndpointParcelable &parcelable) {
+    AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGD("AAudioService::getStreamDescription(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        return AAUDIO_ERROR_INVALID_HANDLE;
+    }
+    return serviceStream->getDescription(parcelable);
+}
+
+aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
+    AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGD("AAudioService::startStream(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        return AAUDIO_ERROR_INVALID_HANDLE;
+    }
+    aaudio_result_t result = serviceStream->start();
+    return result;
+}
+
+aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
+    AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGD("AAudioService::pauseStream(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        return AAUDIO_ERROR_INVALID_HANDLE;
+    }
+    aaudio_result_t result = serviceStream->pause();
+    return result;
+}
+
+aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
+    AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGD("AAudioService::flushStream(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        return AAUDIO_ERROR_INVALID_HANDLE;
+    }
+    return serviceStream->flush();
+}
+
+aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
+                                                         pid_t clientThreadId,
+                                                         aaudio_nanoseconds_t periodNanoseconds) {
+    AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGD("AAudioService::registerAudioThread(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        ALOGE("AAudioService::registerAudioThread(), serviceStream == nullptr");
+        return AAUDIO_ERROR_INVALID_HANDLE;
+    }
+    if (serviceStream->getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
+        ALOGE("AAudioService::registerAudioThread(), thread already registered");
+        return AAUDIO_ERROR_INVALID_ORDER;
+    }
+    serviceStream->setRegisteredThread(clientThreadId);
+    // Boost client thread to SCHED_FIFO
+    struct sched_param sp;
+    memset(&sp, 0, sizeof(sp));
+    sp.sched_priority = 2; // TODO use 'requestPriority' function from frameworks/av/media/utils
+    int err = sched_setscheduler(clientThreadId, SCHED_FIFO, &sp);
+    if (err != 0){
+        ALOGE("AAudioService::sched_setscheduler() failed, errno = %d, priority = %d",
+              errno, sp.sched_priority);
+        return AAUDIO_ERROR_INTERNAL;
+    } else {
+        return AAUDIO_OK;
+    }
+}
+
+aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
+                                                           pid_t clientThreadId) {
+    AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+    ALOGI("AAudioService::unregisterAudioThread(), serviceStream = %p", serviceStream);
+    if (serviceStream == nullptr) {
+        ALOGE("AAudioService::unregisterAudioThread(), serviceStream == nullptr");
+        return AAUDIO_ERROR_INVALID_HANDLE;
+    }
+    if (serviceStream->getRegisteredThread() != clientThreadId) {
+        ALOGE("AAudioService::unregisterAudioThread(), wrong thread");
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    }
+    serviceStream->setRegisteredThread(0);
+    return AAUDIO_OK;
+}
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
new file mode 100644
index 0000000..e9625b2
--- /dev/null
+++ b/services/oboeservice/AAudioService.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_AUDIO_SERVICE_H
+#define AAUDIO_AAUDIO_AUDIO_SERVICE_H
+
+#include <time.h>
+#include <pthread.h>
+
+#include <binder/BinderService.h>
+
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+#include "utility/HandleTracker.h"
+#include "IAAudioService.h"
+#include "AAudioServiceStreamBase.h"
+
+namespace android {
+
+class AAudioService :
+    public BinderService<AAudioService>,
+    public BnAAudioService
+{
+    friend class BinderService<AAudioService>;
+
+public:
+    AAudioService();
+    virtual ~AAudioService();
+
+    static const char* getServiceName() { return "media.audio_aaudio"; }
+
+    virtual aaudio_handle_t openStream(aaudio::AAudioStreamRequest &request,
+                                     aaudio::AAudioStreamConfiguration &configuration);
+
+    virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle);
+
+    virtual aaudio_result_t getStreamDescription(
+                aaudio_handle_t streamHandle,
+                aaudio::AudioEndpointParcelable &parcelable);
+
+    virtual aaudio_result_t startStream(aaudio_handle_t streamHandle);
+
+    virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle);
+
+    virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle);
+
+    virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+                                              pid_t pid, aaudio_nanoseconds_t periodNanoseconds) ;
+
+    virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle, pid_t pid);
+
+private:
+
+    aaudio::AAudioServiceStreamBase *convertHandleToServiceStream(aaudio_handle_t streamHandle) const;
+
+    HandleTracker mHandleTracker;
+
+};
+
+} /* namespace android */
+
+#endif //AAUDIO_AAUDIO_AUDIO_SERVICE_H
diff --git a/services/oboeservice/AAudioServiceDefinitions.h b/services/oboeservice/AAudioServiceDefinitions.h
new file mode 100644
index 0000000..ee9aaa7
--- /dev/null
+++ b/services/oboeservice/AAudioServiceDefinitions.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_H
+#define AAUDIO_AAUDIO_SERVICE_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudio.h>
+
+#include "binding/RingBufferParcelable.h"
+
+namespace aaudio {
+
+// TODO move this an "include" folder for the service.
+
+struct AAudioMessageTimestamp {
+    aaudio_position_frames_t position;
+    int64_t                deviceOffset; // add to client position to get device position
+    aaudio_nanoseconds_t     timestamp;
+};
+
+typedef enum aaudio_service_event_e : uint32_t {
+    AAUDIO_SERVICE_EVENT_STARTED,
+    AAUDIO_SERVICE_EVENT_PAUSED,
+    AAUDIO_SERVICE_EVENT_FLUSHED,
+    AAUDIO_SERVICE_EVENT_CLOSED,
+    AAUDIO_SERVICE_EVENT_DISCONNECTED
+} aaudio_service_event_t;
+
+struct AAudioMessageEvent {
+    aaudio_service_event_t event;
+    int32_t data1;
+    int64_t data2;
+};
+
+typedef struct AAudioServiceMessage_s {
+    enum class code : uint32_t {
+        NOTHING,
+        TIMESTAMP,
+        EVENT,
+    };
+
+    code what;
+    union {
+        AAudioMessageTimestamp timestamp;
+        AAudioMessageEvent event;
+    };
+} AAudioServiceMessage;
+
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_H
diff --git a/services/oboeservice/AAudioServiceMain.cpp b/services/oboeservice/AAudioServiceMain.cpp
new file mode 100644
index 0000000..aa89180
--- /dev/null
+++ b/services/oboeservice/AAudioServiceMain.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <math.h>
+
+#include <utils/RefBase.h>
+#include <binder/TextOutput.h>
+
+#include <binder/IInterface.h>
+#include <binder/IBinder.h>
+#include <binder/ProcessState.h>
+#include <binder/IServiceManager.h>
+#include <binder/IPCThreadState.h>
+
+#include <cutils/ashmem.h>
+#include <sys/mman.h>
+
+#include "AAudioServiceDefinitions.h"
+#include "IAAudioService.h"
+#include "AAudioService.h"
+
+using namespace android;
+using namespace aaudio;
+
+/**
+ * This is used to test the AAudioService as a standalone application.
+ * It is not used when the AAudioService is integrated with AudioFlinger.
+ */
+int main(int argc, char **argv) {
+    printf("Test AAudioService %s\n", argv[1]);
+    ALOGD("This is the AAudioService");
+
+    defaultServiceManager()->addService(String16("AAudioService"), new AAudioService());
+    android::ProcessState::self()->startThreadPool();
+    printf("AAudioService service is now ready\n");
+    IPCThreadState::self()->joinThreadPool();
+    printf("AAudioService service thread joined\n");
+
+    return 0;
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
new file mode 100644
index 0000000..a7938dc
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "IAAudioService.h"
+#include "AAudioServiceDefinitions.h"
+#include "AAudioServiceStreamBase.h"
+#include "AudioEndpointParcelable.h"
+
+using namespace android;
+using namespace aaudio;
+
+/**
+ * Construct the AudioCommandQueues and the AudioDataQueue
+ * and fill in the endpoint parcelable.
+ */
+
+AAudioServiceStreamBase::AAudioServiceStreamBase()
+        : mUpMessageQueue(nullptr)
+{
+    // TODO could fail so move out of constructor
+    mUpMessageQueue = new SharedRingBuffer();
+    mUpMessageQueue->allocate(sizeof(AAudioServiceMessage), QUEUE_UP_CAPACITY_COMMANDS);
+}
+
+AAudioServiceStreamBase::~AAudioServiceStreamBase() {
+    Mutex::Autolock _l(mLockUpMessageQueue);
+    delete mUpMessageQueue;
+}
+
+void AAudioServiceStreamBase::sendServiceEvent(aaudio_service_event_t event,
+                              int32_t data1,
+                              int64_t data2) {
+
+    Mutex::Autolock _l(mLockUpMessageQueue);
+    AAudioServiceMessage command;
+    command.what = AAudioServiceMessage::code::EVENT;
+    command.event.event = event;
+    command.event.data1 = data1;
+    command.event.data2 = data2;
+    mUpMessageQueue->getFifoBuffer()->write(&command, 1);
+}
+
+
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
new file mode 100644
index 0000000..4a59253
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
+
+#include <utils/Mutex.h>
+
+#include "IAAudioService.h"
+#include "AAudioServiceDefinitions.h"
+#include "fifo/FifoBuffer.h"
+#include "SharedRingBuffer.h"
+#include "AudioEndpointParcelable.h"
+#include "AAudioThread.h"
+
+namespace aaudio {
+
+// We expect the queue to only have a few commands.
+// This should be way more than we need.
+#define QUEUE_UP_CAPACITY_COMMANDS (128)
+
+class AAudioServiceStreamBase {
+
+public:
+    AAudioServiceStreamBase();
+    virtual ~AAudioServiceStreamBase();
+
+    enum {
+        ILLEGAL_THREAD_ID = 0
+    };
+
+    /**
+     * Fill in a parcelable description of stream.
+     */
+    virtual aaudio_result_t getDescription(aaudio::AudioEndpointParcelable &parcelable) = 0;
+
+    /**
+     * Open the device.
+     */
+    virtual aaudio_result_t open(aaudio::AAudioStreamRequest &request,
+                               aaudio::AAudioStreamConfiguration &configuration) = 0;
+
+    /**
+     * Start the flow of data.
+     */
+    virtual aaudio_result_t start() = 0;
+
+    /**
+     * Stop the flow of data such that start() can resume with loss of data.
+     */
+    virtual aaudio_result_t pause() = 0;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     */
+    virtual aaudio_result_t flush() = 0;
+
+    virtual aaudio_result_t close() = 0;
+
+    virtual void sendCurrentTimestamp() = 0;
+
+    aaudio_size_frames_t getFramesPerBurst() {
+        return mFramesPerBurst;
+    }
+
+    virtual void sendServiceEvent(aaudio_service_event_t event,
+                                  int32_t data1 = 0,
+                                  int64_t data2 = 0);
+
+    virtual void setRegisteredThread(pid_t pid) {
+        mRegisteredClientThread = pid;
+    }
+
+    virtual pid_t getRegisteredThread() {
+        return mRegisteredClientThread;
+    }
+
+protected:
+
+    pid_t                    mRegisteredClientThread = ILLEGAL_THREAD_ID;
+
+    SharedRingBuffer *       mUpMessageQueue;
+
+    aaudio_sample_rate_t       mSampleRate = 0;
+    aaudio_size_bytes_t        mBytesPerFrame = 0;
+    aaudio_size_frames_t       mFramesPerBurst = 0;
+    aaudio_size_frames_t       mCapacityInFrames = 0;
+    aaudio_size_bytes_t        mCapacityInBytes = 0;
+
+    android::Mutex           mLockUpMessageQueue;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
diff --git a/services/oboeservice/AAudioServiceStreamFakeHal.cpp b/services/oboeservice/AAudioServiceStreamFakeHal.cpp
new file mode 100644
index 0000000..627a504
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamFakeHal.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <atomic>
+
+#include "AudioClock.h"
+#include "AudioEndpointParcelable.h"
+
+#include "AAudioServiceStreamBase.h"
+#include "AAudioServiceStreamFakeHal.h"
+
+#include "FakeAudioHal.h"
+
+using namespace android;
+using namespace aaudio;
+
+// HACK values for Marlin
+#define CARD_ID              0
+#define DEVICE_ID            19
+
+/**
+ * Construct the audio message queuues and message queues.
+ */
+
+AAudioServiceStreamFakeHal::AAudioServiceStreamFakeHal()
+        : AAudioServiceStreamBase()
+        , mStreamId(nullptr)
+        , mPreviousFrameCounter(0)
+        , mAAudioThread()
+{
+}
+
+AAudioServiceStreamFakeHal::~AAudioServiceStreamFakeHal() {
+    ALOGD("AAudioServiceStreamFakeHal::~AAudioServiceStreamFakeHal() call close()");
+    close();
+}
+
+aaudio_result_t AAudioServiceStreamFakeHal::open(aaudio::AAudioStreamRequest &request,
+                                             aaudio::AAudioStreamConfiguration &configuration) {
+    // Open stream on HAL and pass information about the ring buffer to the client.
+    mmap_buffer_info mmapInfo;
+    aaudio_result_t error;
+
+    // Open HAL
+    error = fake_hal_open(CARD_ID, DEVICE_ID, &mStreamId);
+    if(error < 0) {
+        ALOGE("Could not open card %d, device %d", CARD_ID, DEVICE_ID);
+        return error;
+    }
+
+    // Get information about the shared audio buffer.
+    error = fake_hal_get_mmap_info(mStreamId, &mmapInfo);
+    if (error < 0) {
+        ALOGE("fake_hal_get_mmap_info returned %d", error);
+        fake_hal_close(mStreamId);
+        mStreamId = nullptr;
+        return error;
+    }
+    mHalFileDescriptor = mmapInfo.fd;
+    mFramesPerBurst = mmapInfo.burst_size_in_frames;
+    mCapacityInFrames = mmapInfo.buffer_capacity_in_frames;
+    mCapacityInBytes = mmapInfo.buffer_capacity_in_bytes;
+    mSampleRate = mmapInfo.sample_rate;
+    mBytesPerFrame = mmapInfo.channel_count * sizeof(int16_t); // FIXME based on data format
+    ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.burst_size_in_frames = %d",
+         mmapInfo.burst_size_in_frames);
+    ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_frames = %d",
+         mmapInfo.buffer_capacity_in_frames);
+    ALOGD("AAudioServiceStreamFakeHal::open() mmapInfo.buffer_capacity_in_bytes = %d",
+         mmapInfo.buffer_capacity_in_bytes);
+
+    // Fill in AAudioStreamConfiguration
+    configuration.setSampleRate(mSampleRate);
+    configuration.setSamplesPerFrame(mmapInfo.channel_count);
+    configuration.setAudioFormat(AAUDIO_FORMAT_PCM_I16);
+
+    return AAUDIO_OK;
+}
+
+/**
+ * Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+aaudio_result_t AAudioServiceStreamFakeHal::getDescription(AudioEndpointParcelable &parcelable) {
+    // Gather information on the message queue.
+    mUpMessageQueue->fillParcelable(parcelable,
+                                    parcelable.mUpMessageQueueParcelable);
+
+    // Gather information on the data queue.
+    // TODO refactor into a SharedRingBuffer?
+    int fdIndex = parcelable.addFileDescriptor(mHalFileDescriptor, mCapacityInBytes);
+    parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, mCapacityInBytes);
+    parcelable.mDownDataQueueParcelable.setBytesPerFrame(mBytesPerFrame);
+    parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+    parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
+    return AAUDIO_OK;
+}
+
+/**
+ * Start the flow of data.
+ */
+aaudio_result_t AAudioServiceStreamFakeHal::start() {
+    if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
+    aaudio_result_t result = fake_hal_start(mStreamId);
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
+    mState = AAUDIO_STREAM_STATE_STARTED;
+    if (result == AAUDIO_OK) {
+        mThreadEnabled.store(true);
+        result = mAAudioThread.start(this);
+    }
+    return result;
+}
+
+/**
+ * Stop the flow of data such that start() can resume with loss of data.
+ */
+aaudio_result_t AAudioServiceStreamFakeHal::pause() {
+    if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
+    sendCurrentTimestamp();
+    aaudio_result_t result = fake_hal_pause(mStreamId);
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
+    mState = AAUDIO_STREAM_STATE_PAUSED;
+    mFramesRead.reset32();
+    ALOGD("AAudioServiceStreamFakeHal::pause() sent AAUDIO_SERVICE_EVENT_PAUSED");
+    mThreadEnabled.store(false);
+    result = mAAudioThread.stop();
+    return result;
+}
+
+/**
+ *  Discard any data held by the underlying HAL or Service.
+ */
+aaudio_result_t AAudioServiceStreamFakeHal::flush() {
+    if (mStreamId == nullptr) return AAUDIO_ERROR_NULL;
+    // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
+    ALOGD("AAudioServiceStreamFakeHal::pause() send AAUDIO_SERVICE_EVENT_FLUSHED");
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
+    mState = AAUDIO_STREAM_STATE_FLUSHED;
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceStreamFakeHal::close() {
+    aaudio_result_t result = AAUDIO_OK;
+    if (mStreamId != nullptr) {
+        result = fake_hal_close(mStreamId);
+        mStreamId = nullptr;
+    }
+    return result;
+}
+
+void AAudioServiceStreamFakeHal::sendCurrentTimestamp() {
+    int frameCounter = 0;
+    int error = fake_hal_get_frame_counter(mStreamId, &frameCounter);
+    if (error < 0) {
+        ALOGE("AAudioServiceStreamFakeHal::sendCurrentTimestamp() error %d",
+                error);
+    } else if (frameCounter != mPreviousFrameCounter) {
+        AAudioServiceMessage command;
+        command.what = AAudioServiceMessage::code::TIMESTAMP;
+        mFramesRead.update32(frameCounter);
+        command.timestamp.position = mFramesRead.get();
+        ALOGD("AAudioServiceStreamFakeHal::sendCurrentTimestamp() HAL frames = %d, pos = %d",
+                frameCounter, (int)mFramesRead.get());
+        command.timestamp.timestamp = AudioClock::getNanoseconds();
+        mUpMessageQueue->getFifoBuffer()->write(&command, 1);
+        mPreviousFrameCounter = frameCounter;
+    }
+}
+
+// implement Runnable
+void AAudioServiceStreamFakeHal::run() {
+    TimestampScheduler timestampScheduler;
+    timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
+    timestampScheduler.start(AudioClock::getNanoseconds());
+    while(mThreadEnabled.load()) {
+        aaudio_nanoseconds_t nextTime = timestampScheduler.nextAbsoluteTime();
+        if (AudioClock::getNanoseconds() >= nextTime) {
+            sendCurrentTimestamp();
+        } else  {
+            // Sleep until it is time to send the next timestamp.
+            AudioClock::sleepUntilNanoTime(nextTime);
+        }
+    }
+}
+
diff --git a/services/oboeservice/AAudioServiceStreamFakeHal.h b/services/oboeservice/AAudioServiceStreamFakeHal.h
new file mode 100644
index 0000000..170d0ee
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamFakeHal.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
+
+#include "AAudioServiceDefinitions.h"
+#include "AAudioServiceStreamBase.h"
+#include "FakeAudioHal.h"
+#include "MonotonicCounter.h"
+#include "AudioEndpointParcelable.h"
+#include "TimestampScheduler.h"
+
+namespace aaudio {
+
+class AAudioServiceStreamFakeHal
+    : public AAudioServiceStreamBase
+    , public Runnable {
+
+public:
+    AAudioServiceStreamFakeHal();
+    virtual ~AAudioServiceStreamFakeHal();
+
+    virtual aaudio_result_t getDescription(AudioEndpointParcelable &parcelable) override;
+
+    virtual aaudio_result_t open(aaudio::AAudioStreamRequest &request,
+                               aaudio::AAudioStreamConfiguration &configuration) override;
+
+    /**
+     * Start the flow of data.
+     */
+    virtual aaudio_result_t start() override;
+
+    /**
+     * Stop the flow of data such that start() can resume with loss of data.
+     */
+    virtual aaudio_result_t pause() override;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     */
+    virtual aaudio_result_t flush() override;
+
+    virtual aaudio_result_t close() override;
+
+    void sendCurrentTimestamp();
+
+    virtual void run() override; // to implement Runnable
+
+private:
+    fake_hal_stream_ptr    mStreamId; // Move to HAL
+
+    MonotonicCounter       mFramesWritten;
+    MonotonicCounter       mFramesRead;
+    int                    mHalFileDescriptor = -1;
+    int                    mPreviousFrameCounter = 0;   // from HAL
+
+    aaudio_stream_state_t    mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+
+    AAudioThread             mAAudioThread;
+    std::atomic<bool>      mThreadEnabled;
+};
+
+} // namespace aaudio
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_FAKE_HAL_H
diff --git a/services/oboeservice/AAudioThread.cpp b/services/oboeservice/AAudioThread.cpp
new file mode 100644
index 0000000..f5e5784
--- /dev/null
+++ b/services/oboeservice/AAudioThread.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <pthread.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+#include "AAudioThread.h"
+
+using namespace aaudio;
+
+
+AAudioThread::AAudioThread() {
+    // mThread is a pthread_t of unknown size so we need memset.
+    memset(&mThread, 0, sizeof(mThread));
+}
+
+void AAudioThread::dispatch() {
+    if (mRunnable != nullptr) {
+        mRunnable->run();
+    } else {
+        run();
+    }
+}
+
+// This is the entry point for the new thread created by createThread().
+// It converts the 'C' function call to a C++ method call.
+static void * AAudioThread_internalThreadProc(void *arg) {
+    AAudioThread *aaudioThread = (AAudioThread *) arg;
+    aaudioThread->dispatch();
+    return nullptr;
+}
+
+aaudio_result_t AAudioThread::start(Runnable *runnable) {
+    if (mHasThread) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    mRunnable = runnable; // TODO use atomic?
+    int err = pthread_create(&mThread, nullptr, AAudioThread_internalThreadProc, this);
+    if (err != 0) {
+        ALOGE("AAudioThread::pthread_create() returned %d", err);
+        // TODO convert errno to aaudio_result_t
+        return AAUDIO_ERROR_INTERNAL;
+    } else {
+        mHasThread = true;
+        return AAUDIO_OK;
+    }
+}
+
+aaudio_result_t AAudioThread::stop() {
+    if (!mHasThread) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    int err = pthread_join(mThread, nullptr);
+    mHasThread = false;
+    // TODO convert errno to aaudio_result_t
+    return err ? AAUDIO_ERROR_INTERNAL : AAUDIO_OK;
+}
+
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
new file mode 100644
index 0000000..1f676dc
--- /dev/null
+++ b/services/oboeservice/AAudioThread.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_THREAD_H
+#define AAUDIO_THREAD_H
+
+#include <atomic>
+#include <pthread.h>
+
+#include <aaudio/AAudioDefinitions.h>
+
+namespace aaudio {
+
+class Runnable {
+public:
+    Runnable() {};
+    virtual ~Runnable() = default;
+
+    virtual void run() {}
+};
+
+/**
+ * Abstraction for a host thread.
+ */
+class AAudioThread
+{
+public:
+    AAudioThread();
+    AAudioThread(Runnable *runnable);
+    virtual ~AAudioThread() = default;
+
+    /**
+     * Start the thread running.
+     */
+    aaudio_result_t start(Runnable *runnable = nullptr);
+
+    /**
+     * Join the thread.
+     * The caller must somehow tell the thread to exit before calling join().
+     */
+    aaudio_result_t stop();
+
+    /**
+     * This will get called in the thread.
+     * Override this or pass a Runnable to start().
+     */
+    virtual void run() {};
+
+    void dispatch(); // called internally from 'C' thread wrapper
+
+private:
+    Runnable*                mRunnable = nullptr; // TODO make atomic with memory barrier?
+    bool                     mHasThread = false;
+    pthread_t                mThread; // initialized in constructor
+
+};
+
+} /* namespace aaudio */
+
+#endif ///AAUDIO_THREAD_H
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
new file mode 100644
index 0000000..68e43ed
--- /dev/null
+++ b/services/oboeservice/Android.mk
@@ -0,0 +1,54 @@
+LOCAL_PATH:= $(call my-dir)
+
+# AAudio Service
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := oboeservice
+LOCAL_MODULE_TAGS := optional
+
+LIBAAUDIO_DIR := ../../media/liboboe
+LIBAAUDIO_SRC_DIR := $(LIBAAUDIO_DIR)/src
+
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/native/include \
+    system/core/base/include \
+    $(TOP)/frameworks/native/media/liboboe/include/include \
+    $(TOP)/frameworks/av/media/liboboe/include \
+    frameworks/native/include \
+    $(TOP)/external/tinyalsa/include \
+    $(TOP)/frameworks/av/media/liboboe/src \
+    $(TOP)/frameworks/av/media/liboboe/src/binding \
+    $(TOP)/frameworks/av/media/liboboe/src/client \
+    $(TOP)/frameworks/av/media/liboboe/src/core \
+    $(TOP)/frameworks/av/media/liboboe/src/fifo \
+    $(TOP)/frameworks/av/media/liboboe/src/utility
+
+# TODO These could be in a liboboe_common library
+LOCAL_SRC_FILES += \
+    $(LIBAAUDIO_SRC_DIR)/utility/HandleTracker.cpp \
+    $(LIBAAUDIO_SRC_DIR)/utility/AAudioUtilities.cpp \
+    $(LIBAAUDIO_SRC_DIR)/fifo/FifoBuffer.cpp \
+    $(LIBAAUDIO_SRC_DIR)/fifo/FifoControllerBase.cpp \
+    $(LIBAAUDIO_SRC_DIR)/binding/SharedMemoryParcelable.cpp \
+    $(LIBAAUDIO_SRC_DIR)/binding/SharedRegionParcelable.cpp \
+    $(LIBAAUDIO_SRC_DIR)/binding/RingBufferParcelable.cpp \
+    $(LIBAAUDIO_SRC_DIR)/binding/AudioEndpointParcelable.cpp \
+    $(LIBAAUDIO_SRC_DIR)/binding/AAudioStreamRequest.cpp \
+    $(LIBAAUDIO_SRC_DIR)/binding/AAudioStreamConfiguration.cpp \
+    $(LIBAAUDIO_SRC_DIR)/binding/IAAudioService.cpp \
+    SharedRingBuffer.cpp \
+    FakeAudioHal.cpp \
+    AAudioService.cpp \
+    AAudioServiceStreamBase.cpp \
+    AAudioServiceStreamFakeHal.cpp \
+    TimestampScheduler.cpp \
+    AAudioServiceMain.cpp \
+    AAudioThread.cpp
+
+LOCAL_CFLAGS += -Wno-unused-parameter
+LOCAL_CFLAGS += -Wall -Werror
+
+LOCAL_SHARED_LIBRARIES :=  libbinder libcutils libutils liblog libtinyalsa
+
+include $(BUILD_EXECUTABLE)
diff --git a/services/oboeservice/FakeAudioHal.cpp b/services/oboeservice/FakeAudioHal.cpp
new file mode 100644
index 0000000..cf0dc86
--- /dev/null
+++ b/services/oboeservice/FakeAudioHal.cpp
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Simple fake HAL that supports ALSA MMAP/NOIRQ mode.
+ */
+
+#include <iostream>
+#include <math.h>
+#include <limits>
+#include <string.h>
+#include <unistd.h>
+
+#define __force
+#define __bitwise
+#define __user
+#include <sound/asound.h>
+
+#include "tinyalsa/asoundlib.h"
+
+#include "FakeAudioHal.h"
+
+//using namespace aaudio;
+
+using sample_t = int16_t;
+using std::cout;
+using std::endl;
+
+#undef SNDRV_PCM_IOCTL_SYNC_PTR
+#define SNDRV_PCM_IOCTL_SYNC_PTR 0xc0884123
+#define PCM_ERROR_MAX 128
+
+const int SAMPLE_RATE = 48000;       // Hz
+const int CHANNEL_COUNT = 2;
+
+struct pcm {
+    int fd;
+    unsigned int flags;
+    int running:1;
+    int prepared:1;
+    int underruns;
+    unsigned int buffer_size;
+    unsigned int boundary;
+    char error[PCM_ERROR_MAX];
+    struct pcm_config config;
+    struct snd_pcm_mmap_status *mmap_status;
+    struct snd_pcm_mmap_control *mmap_control;
+    struct snd_pcm_sync_ptr *sync_ptr;
+    void *mmap_buffer;
+    unsigned int noirq_frames_per_msec;
+    int wait_for_avail_min;
+};
+
+static int pcm_sync_ptr(struct pcm *pcm, int flags) {
+    if (pcm->sync_ptr) {
+        pcm->sync_ptr->flags = flags;
+        if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_SYNC_PTR, pcm->sync_ptr) < 0)
+            return -1;
+    }
+    return 0;
+}
+
+int pcm_get_hw_ptr(struct pcm* pcm, unsigned int* hw_ptr) {
+    if (!hw_ptr || !pcm) return -EINVAL;
+
+    int result = pcm_sync_ptr(pcm, SNDRV_PCM_SYNC_PTR_HWSYNC);
+    if (!result) {
+        *hw_ptr = pcm->sync_ptr->s.status.hw_ptr;
+    }
+
+    return result;
+}
+
+typedef struct stream_tracker {
+    struct pcm * pcm;
+    int          framesPerBurst;
+    sample_t   * hwBuffer;
+    int32_t      capacityInFrames;
+    int32_t      capacityInBytes;
+} stream_tracker_t;
+
+#define FRAMES_PER_BURST_QUALCOMM 192
+#define FRAMES_PER_BURST_NVIDIA   128
+
+int fake_hal_open(int card_id, int device_id, fake_hal_stream_ptr *streamPP) {
+    int framesPerBurst = FRAMES_PER_BURST_QUALCOMM; // TODO update as needed
+    int periodCount = 32;
+    unsigned int offset1;
+    unsigned int frames1;
+    void *area = nullptr;
+    int mmapAvail = 0;
+
+    // Configuration for an ALSA stream.
+    pcm_config cfg;
+    memset(&cfg, 0, sizeof(cfg));
+    cfg.channels = CHANNEL_COUNT;
+    cfg.format = PCM_FORMAT_S16_LE;
+    cfg.rate = SAMPLE_RATE;
+    cfg.period_count = periodCount;
+    cfg.period_size = framesPerBurst;
+    cfg.start_threshold = 0; // for NOIRQ, should just start, was     framesPerBurst;
+    cfg.stop_threshold = INT32_MAX;
+    cfg.silence_size = 0;
+    cfg.silence_threshold = 0;
+    cfg.avail_min = framesPerBurst;
+
+    stream_tracker_t *streamTracker = (stream_tracker_t *) malloc(sizeof(stream_tracker_t));
+    if (streamTracker == nullptr) {
+        return -1;
+    }
+    memset(streamTracker, 0, sizeof(stream_tracker_t));
+
+    streamTracker->pcm = pcm_open(card_id, device_id, PCM_OUT | PCM_MMAP | PCM_NOIRQ, &cfg);
+    if (streamTracker->pcm == nullptr) {
+        cout << "Could not open device." << endl;
+        free(streamTracker);
+        return -1;
+    }
+
+    streamTracker->framesPerBurst = cfg.period_size; // Get from ALSA
+    streamTracker->capacityInFrames = pcm_get_buffer_size(streamTracker->pcm);
+    streamTracker->capacityInBytes = pcm_frames_to_bytes(streamTracker->pcm, streamTracker->capacityInFrames);
+    std::cout << "fake_hal_open() streamTracker->framesPerBurst = " << streamTracker->framesPerBurst << std::endl;
+    std::cout << "fake_hal_open() streamTracker->capacityInFrames = " << streamTracker->capacityInFrames << std::endl;
+
+    if (pcm_is_ready(streamTracker->pcm) < 0) {
+        cout << "Device is not ready." << endl;
+        goto error;
+    }
+
+    if (pcm_prepare(streamTracker->pcm) < 0) {
+        cout << "Device could not be prepared." << endl;
+        cout << "For Marlin, please enter:" << endl;
+        cout << "   adb shell" << endl;
+        cout << "   tinymix \"QUAT_MI2S_RX Audio Mixer MultiMedia8\" 1" << endl;
+        goto error;
+    }
+    mmapAvail = pcm_mmap_avail(streamTracker->pcm);
+    if (mmapAvail <= 0) {
+        cout << "fake_hal_open() mmap_avail is <=0" << endl;
+        goto error;
+    }
+    cout << "fake_hal_open() mmap_avail = " << mmapAvail << endl;
+
+    // Where is the memory mapped area?
+    if (pcm_mmap_begin(streamTracker->pcm, &area, &offset1, &frames1) < 0)  {
+        cout << "fake_hal_open() pcm_mmap_begin failed" << endl;
+        goto error;
+    }
+
+    // Clear the buffer.
+    memset((sample_t*) area, 0, streamTracker->capacityInBytes);
+    streamTracker->hwBuffer = (sample_t*) area;
+    streamTracker->hwBuffer[0] = 32000; // impulse
+
+    // Prime the buffer so it can start.
+    if (pcm_mmap_commit(streamTracker->pcm, 0, framesPerBurst) < 0) {
+        cout << "fake_hal_open() pcm_mmap_commit failed" << endl;
+        goto error;
+    }
+
+    *streamPP = streamTracker;
+    return 1;
+
+error:
+    fake_hal_close(streamTracker);
+    return -1;
+}
+
+int fake_hal_get_mmap_info(fake_hal_stream_ptr stream, mmap_buffer_info *info) {
+    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
+    info->fd = streamTracker->pcm->fd; // TODO use tinyalsa function
+    info->hw_buffer = streamTracker->hwBuffer;
+    info->burst_size_in_frames = streamTracker->framesPerBurst;
+    info->buffer_capacity_in_frames = streamTracker->capacityInFrames;
+    info->buffer_capacity_in_bytes = streamTracker->capacityInBytes;
+    info->sample_rate = SAMPLE_RATE;
+    info->channel_count = CHANNEL_COUNT;
+    return 0;
+}
+
+int fake_hal_start(fake_hal_stream_ptr stream) {
+    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
+    if (pcm_start(streamTracker->pcm) < 0) {
+        cout << "fake_hal_start failed" << endl;
+        return -1;
+    }
+    return 0;
+}
+
+int fake_hal_pause(fake_hal_stream_ptr stream) {
+    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
+    if (pcm_stop(streamTracker->pcm) < 0) {
+        cout << "fake_hal_stop failed" << endl;
+        return -1;
+    }
+    return 0;
+}
+
+int fake_hal_get_frame_counter(fake_hal_stream_ptr stream, int *frame_counter) {
+    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
+    if (pcm_get_hw_ptr(streamTracker->pcm, (unsigned int *)frame_counter) < 0) {
+        cout << "fake_hal_get_frame_counter failed" << endl;
+        return -1;
+    }
+    return 0;
+}
+
+int fake_hal_close(fake_hal_stream_ptr stream) {
+    stream_tracker_t *streamTracker = (stream_tracker_t *) stream;
+    pcm_close(streamTracker->pcm);
+    free(streamTracker);
+    return 0;
+}
+
diff --git a/services/oboeservice/FakeAudioHal.h b/services/oboeservice/FakeAudioHal.h
new file mode 100644
index 0000000..f47ccd9
--- /dev/null
+++ b/services/oboeservice/FakeAudioHal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Simple fake HAL that supports ALSA MMAP/NOIRQ mode.
+ */
+
+#ifndef FAKE_AUDIO_HAL_H
+#define FAKE_AUDIO_HAL_H
+
+//namespace aaudio {
+
+using sample_t = int16_t;
+struct mmap_buffer_info {
+    int       fd;
+    int32_t   burst_size_in_frames;
+    int32_t   buffer_capacity_in_frames;
+    int32_t   buffer_capacity_in_bytes;
+    int32_t   sample_rate;
+    int32_t   channel_count;
+    sample_t *hw_buffer;
+};
+
+typedef void *fake_hal_stream_ptr;
+
+//extern "C"
+//{
+
+int fake_hal_open(int card_id, int device_id, fake_hal_stream_ptr *stream_pp);
+
+int fake_hal_get_mmap_info(fake_hal_stream_ptr stream, mmap_buffer_info *info);
+
+int fake_hal_start(fake_hal_stream_ptr stream);
+
+int fake_hal_pause(fake_hal_stream_ptr stream);
+
+int fake_hal_get_frame_counter(fake_hal_stream_ptr stream, int *frame_counter);
+
+int fake_hal_close(fake_hal_stream_ptr stream);
+
+//} /* "C" */
+
+//} /* namespace aaudio */
+
+#endif // FAKE_AUDIO_HAL_H
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
new file mode 100644
index 0000000..9ac8fdf
--- /dev/null
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "AudioClock.h"
+#include "AudioEndpointParcelable.h"
+
+//#include "AAudioServiceStreamBase.h"
+//#include "AAudioServiceStreamFakeHal.h"
+
+#include "SharedRingBuffer.h"
+
+using namespace android;
+using namespace aaudio;
+
+SharedRingBuffer::~SharedRingBuffer()
+{
+    if (mSharedMemory != nullptr) {
+        delete mFifoBuffer;
+        munmap(mSharedMemory, mSharedMemorySizeInBytes);
+        close(mFileDescriptor);
+        mSharedMemory = nullptr;
+    }
+}
+
+aaudio_result_t SharedRingBuffer::allocate(fifo_frames_t   bytesPerFrame,
+                                         fifo_frames_t   capacityInFrames) {
+    mCapacityInFrames = capacityInFrames;
+
+    // Create shared memory large enough to hold the data and the read and write counters.
+    mDataMemorySizeInBytes = bytesPerFrame * capacityInFrames;
+    mSharedMemorySizeInBytes = mDataMemorySizeInBytes + (2 * (sizeof(fifo_counter_t)));
+    mFileDescriptor = ashmem_create_region("AAudioSharedRingBuffer", mSharedMemorySizeInBytes);
+    if (mFileDescriptor < 0) {
+        ALOGE("SharedRingBuffer::allocate() ashmem_create_region() failed %d", errno);
+        return AAUDIO_ERROR_INTERNAL;
+    }
+    int err = ashmem_set_prot_region(mFileDescriptor, PROT_READ|PROT_WRITE); // TODO error handling?
+    if (err < 0) {
+        ALOGE("SharedRingBuffer::allocate() ashmem_set_prot_region() failed %d", errno);
+        close(mFileDescriptor);
+        return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+    }
+
+    // Map the fd to memory addresses.
+    mSharedMemory = (uint8_t *) mmap(0, mSharedMemorySizeInBytes,
+                         PROT_READ|PROT_WRITE,
+                         MAP_SHARED,
+                         mFileDescriptor, 0);
+    if (mSharedMemory == MAP_FAILED) {
+        ALOGE("SharedRingBuffer::allocate() mmap() failed %d", errno);
+        close(mFileDescriptor);
+        return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+    }
+
+    // Get addresses for our counters and data from the shared memory.
+    fifo_counter_t *readCounterAddress =
+            (fifo_counter_t *) &mSharedMemory[SHARED_RINGBUFFER_READ_OFFSET];
+    fifo_counter_t *writeCounterAddress =
+            (fifo_counter_t *) &mSharedMemory[SHARED_RINGBUFFER_WRITE_OFFSET];
+    uint8_t *dataAddress = &mSharedMemory[SHARED_RINGBUFFER_DATA_OFFSET];
+
+    mFifoBuffer = new(std::nothrow) FifoBuffer(bytesPerFrame, capacityInFrames,
+                                 readCounterAddress, writeCounterAddress, dataAddress);
+    return (mFifoBuffer == nullptr) ? AAUDIO_ERROR_NO_MEMORY : AAUDIO_OK;
+}
+
+void SharedRingBuffer::fillParcelable(AudioEndpointParcelable &endpointParcelable,
+                    RingBufferParcelable &ringBufferParcelable) {
+    int fdIndex = endpointParcelable.addFileDescriptor(mFileDescriptor, mSharedMemorySizeInBytes);
+    ringBufferParcelable.setupMemory(fdIndex,
+                                     SHARED_RINGBUFFER_DATA_OFFSET,
+                                     mDataMemorySizeInBytes,
+                                     SHARED_RINGBUFFER_READ_OFFSET,
+                                     SHARED_RINGBUFFER_WRITE_OFFSET,
+                                     sizeof(fifo_counter_t));
+    ringBufferParcelable.setBytesPerFrame(mFifoBuffer->getBytesPerFrame());
+    ringBufferParcelable.setFramesPerBurst(1);
+    ringBufferParcelable.setCapacityInFrames(mCapacityInFrames);
+}
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
new file mode 100644
index 0000000..75f138b
--- /dev/null
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SHARED_RINGBUFFER_H
+#define AAUDIO_SHARED_RINGBUFFER_H
+
+#include <stdint.h>
+#include <cutils/ashmem.h>
+#include <sys/mman.h>
+
+#include "fifo/FifoBuffer.h"
+#include "RingBufferParcelable.h"
+#include "AudioEndpointParcelable.h"
+
+namespace aaudio {
+
+// Determine the placement of the counters and data in shared memory.
+#define SHARED_RINGBUFFER_READ_OFFSET   0
+#define SHARED_RINGBUFFER_WRITE_OFFSET  sizeof(fifo_counter_t)
+#define SHARED_RINGBUFFER_DATA_OFFSET   (SHARED_RINGBUFFER_WRITE_OFFSET + sizeof(fifo_counter_t))
+
+/**
+ * Atomic FIFO that uses shared memory.
+ */
+class SharedRingBuffer {
+public:
+    SharedRingBuffer() {}
+
+    virtual ~SharedRingBuffer();
+
+    aaudio_result_t allocate(fifo_frames_t bytesPerFrame, fifo_frames_t capacityInFrames);
+
+    void fillParcelable(AudioEndpointParcelable &endpointParcelable,
+                        RingBufferParcelable &ringBufferParcelable);
+
+    FifoBuffer * getFifoBuffer() {
+        return mFifoBuffer;
+    }
+
+private:
+    int            mFileDescriptor = -1;
+    FifoBuffer   * mFifoBuffer = nullptr;
+    uint8_t      * mSharedMemory = nullptr;
+    int32_t        mSharedMemorySizeInBytes = 0;
+    int32_t        mDataMemorySizeInBytes = 0;
+    fifo_frames_t  mCapacityInFrames = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_SHARED_RINGBUFFER_H
diff --git a/services/oboeservice/TimestampScheduler.cpp b/services/oboeservice/TimestampScheduler.cpp
new file mode 100644
index 0000000..5875909
--- /dev/null
+++ b/services/oboeservice/TimestampScheduler.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// for random()
+#include <stdlib.h>
+
+#include "TimestampScheduler.h"
+
+using namespace aaudio;
+
+void TimestampScheduler::start(aaudio_nanoseconds_t startTime) {
+    mStartTime = startTime;
+    mLastTime = startTime;
+}
+
+aaudio_nanoseconds_t TimestampScheduler::nextAbsoluteTime() {
+    int64_t periodsElapsed = (mLastTime - mStartTime) / mBurstPeriod;
+    // This is an arbitrary schedule that could probably be improved.
+    // It starts out sending a timestamp on every period because we want to
+    // get an accurate picture when the stream starts. Then it slows down
+    // to the occasional timestamps needed to detect a slow drift.
+    int64_t minPeriodsToDelay = (periodsElapsed < 10) ? 1 :
+        (periodsElapsed < 100) ? 3 :
+        (periodsElapsed < 1000) ? 10 : 50;
+    aaudio_nanoseconds_t sleepTime = minPeriodsToDelay * mBurstPeriod;
+    // Generate a random rectangular distribution one burst wide so that we get
+    // an uncorrelated sampling of the MMAP pointer.
+    sleepTime += (aaudio_nanoseconds_t)(random() * mBurstPeriod / RAND_MAX);
+    mLastTime += sleepTime;
+    return mLastTime;
+}
diff --git a/services/oboeservice/TimestampScheduler.h b/services/oboeservice/TimestampScheduler.h
new file mode 100644
index 0000000..efc9c5f
--- /dev/null
+++ b/services/oboeservice/TimestampScheduler.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_TIMESTAMP_SCHEDULER_H
+#define AAUDIO_TIMESTAMP_SCHEDULER_H
+
+//#include <stdlib.h> // random()
+
+#include "IAAudioService.h"
+#include "AAudioServiceDefinitions.h"
+#include "AudioStream.h"
+#include "fifo/FifoBuffer.h"
+#include "SharedRingBuffer.h"
+#include "AudioEndpointParcelable.h"
+
+namespace aaudio {
+
+/**
+ * Schedule wakeup time for monitoring the position
+ * of an MMAP/NOIRQ buffer.
+ *
+ * Note that this object is not thread safe. Only call it from a single thread.
+ */
+class TimestampScheduler
+{
+public:
+    TimestampScheduler() {};
+    virtual ~TimestampScheduler() = default;
+
+    /**
+     * Start the schedule at the given time.
+     */
+    void start(aaudio_nanoseconds_t startTime);
+
+    /**
+     * Calculate the next time that the read position should be
+     * measured.
+     */
+    aaudio_nanoseconds_t nextAbsoluteTime();
+
+    void setBurstPeriod(aaudio_nanoseconds_t burstPeriod) {
+        mBurstPeriod = burstPeriod;
+    }
+
+    void setBurstPeriod(aaudio_size_frames_t framesPerBurst,
+                        aaudio_sample_rate_t sampleRate) {
+        mBurstPeriod = AAUDIO_NANOS_PER_SECOND * framesPerBurst / sampleRate;
+    }
+
+    aaudio_nanoseconds_t getBurstPeriod() {
+        return mBurstPeriod;
+    }
+
+private:
+    // Start with an arbitrary default so we do not divide by zero.
+    aaudio_nanoseconds_t mBurstPeriod = AAUDIO_NANOS_PER_MILLISECOND;
+    aaudio_nanoseconds_t mStartTime;
+    aaudio_nanoseconds_t mLastTime;
+};
+
+} /* namespace aaudio */
+
+#endif /* AAUDIO_TIMESTAMP_SCHEDULER_H */
diff --git a/services/radio/Android.mk b/services/radio/Android.mk
index 4344506..1b50dc3 100644
--- a/services/radio/Android.mk
+++ b/services/radio/Android.mk
@@ -30,9 +30,13 @@
     libradio \
     libradio_metadata
 
-ifeq ($(ENABLE_TREBLE),true)
+ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL),true)
+# libhardware configuration
+LOCAL_SRC_FILES +=               \
+    RadioHalLegacy.cpp
+else
 # Treble configuration
-LOCAL_CFLAGS += -DENABLE_TREBLE
+
 LOCAL_SRC_FILES += \
     HidlUtils.cpp \
     RadioHalHidl.cpp
@@ -42,14 +46,10 @@
     libhidlbase \
     libhidltransport \
     libbase \
+    libaudiohal \
     android.hardware.broadcastradio@1.0
-else
-# libhardware configuration
-LOCAL_SRC_FILES +=               \
-    RadioHalLegacy.cpp
 endif
 
-
 LOCAL_CFLAGS += -Wall -Wextra -Werror
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/services/radio/RadioHalHidl.cpp b/services/radio/RadioHalHidl.cpp
index 34a6db7..032d3fd 100644
--- a/services/radio/RadioHalHidl.cpp
+++ b/services/radio/RadioHalHidl.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "RadioHalHidl"
 //#define LOG_NDEBUG 0
 
+#include <media/audiohal/hidl/HalDeathHandler.h>
 #include <utils/Log.h>
 #include <utils/misc.h>
 #include <system/radio_metadata.h>
@@ -333,11 +334,27 @@
 RadioHalHidl::Tuner::Tuner(sp<TunerCallbackInterface> callback, sp<RadioHalHidl> module)
     : TunerInterface(), mHalTuner(NULL), mCallback(callback), mParentModule(module)
 {
+    // Make sure the handler we are passing in only deals with const members,
+    // as it can be called on an arbitrary thread.
+    const auto& self = this;
+    HalDeathHandler::getInstance()->registerAtExitHandler(
+            this, [&self]() { self->sendHwFailureEvent(); });
 }
 
 
 RadioHalHidl::Tuner::~Tuner()
 {
+    HalDeathHandler::getInstance()->unregisterAtExitHandler(this);
+}
+
+void RadioHalHidl::Tuner::setHalTuner(sp<ITuner>& halTuner) {
+    if (mHalTuner != 0) {
+        mHalTuner->unlinkToDeath(HalDeathHandler::getInstance());
+    }
+    mHalTuner = halTuner;
+    if (mHalTuner != 0) {
+        mHalTuner->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+    }
 }
 
 void RadioHalHidl::Tuner::handleHwFailure()
@@ -347,14 +364,19 @@
     if (parentModule != 0) {
         parentModule->clearService();
     }
+    sendHwFailureEvent();
+    mHalTuner.clear();
+}
+
+void RadioHalHidl::Tuner::sendHwFailureEvent() const
+{
     radio_hal_event_t event;
     memset(&event, 0, sizeof(radio_hal_event_t));
     event.type = RADIO_EVENT_HW_FAILURE;
     onCallback(&event);
-    mHalTuner.clear();
 }
 
-void RadioHalHidl::Tuner::onCallback(radio_hal_event_t *halEvent)
+void RadioHalHidl::Tuner::onCallback(radio_hal_event_t *halEvent) const
 {
     if (mCallback != 0) {
         mCallback->onEvent(halEvent);
diff --git a/services/radio/RadioHalHidl.h b/services/radio/RadioHalHidl.h
index b60a95e..38e181a 100644
--- a/services/radio/RadioHalHidl.h
+++ b/services/radio/RadioHalHidl.h
@@ -78,17 +78,18 @@
             virtual Return<void> newMetadata(uint32_t channel, uint32_t subChannel,
                                          const ::android::hardware::hidl_vec<MetaData>& metadata);
 
-            void setHalTuner(sp<ITuner>& halTuner) { mHalTuner = halTuner; }
+            void setHalTuner(sp<ITuner>& halTuner);
             sp<ITuner> getHalTuner() { return mHalTuner; }
 
         private:
             virtual          ~Tuner();
 
-                    void     onCallback(radio_hal_event_t *halEvent);
+                    void     onCallback(radio_hal_event_t *halEvent) const;
                     void     handleHwFailure();
+                    void     sendHwFailureEvent() const;
 
             sp<ITuner> mHalTuner;
-            sp<TunerCallbackInterface>  mCallback;
+            const sp<TunerCallbackInterface> mCallback;
             wp<RadioHalHidl> mParentModule;
         };
 
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index e1e1fb1..3e7a7ce 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -34,23 +34,23 @@
     libserviceutility
 
 
-ifeq ($(ENABLE_TREBLE),true)
+ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL),true)
+# libhardware configuration
+LOCAL_SRC_FILES +=               \
+    SoundTriggerHalLegacy.cpp
+else
 # Treble configuration
-LOCAL_CFLAGS += -DENABLE_TREBLE
 LOCAL_SRC_FILES +=               \
     SoundTriggerHalHidl.cpp
 
 LOCAL_SHARED_LIBRARIES += \
-		libhwbinder \
-		libhidlbase \
-		libhidltransport \
-		libbase \
-		android.hardware.soundtrigger@2.0 \
-		android.hardware.audio.common@2.0
-else
-# libhardware configuration
-LOCAL_SRC_FILES +=               \
-    SoundTriggerHalLegacy.cpp
+    libhwbinder \
+    libhidlbase \
+    libhidltransport \
+    libbase \
+    libaudiohal \
+    android.hardware.soundtrigger@2.0 \
+    android.hardware.audio.common@2.0
 endif
 
 
diff --git a/services/soundtrigger/SoundTriggerHalHidl.cpp b/services/soundtrigger/SoundTriggerHalHidl.cpp
index eb9d38d..7cc8a2b 100644
--- a/services/soundtrigger/SoundTriggerHalHidl.cpp
+++ b/services/soundtrigger/SoundTriggerHalHidl.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "SoundTriggerHalHidl"
 //#define LOG_NDEBUG 0
 
+#include <media/audiohal/hidl/HalDeathHandler.h>
 #include <utils/Log.h>
 #include "SoundTriggerHalHidl.h"
 #include <hwbinder/IPCThreadState.h>
@@ -59,7 +60,7 @@
         }
     } else {
         ALOGE("getProperties error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
     ALOGI("getProperties ret %d", ret);
     return ret;
@@ -132,7 +133,7 @@
         }
     } else {
         ALOGE("loadSoundModel error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
 
     return ret;
@@ -159,7 +160,7 @@
 
     if (!hidlReturn.isOk()) {
         ALOGE("unloadSoundModel error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
 
     return hidlReturn;
@@ -197,7 +198,7 @@
 
     if (!hidlReturn.isOk()) {
         ALOGE("startRecognition error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
     return hidlReturn;
 }
@@ -223,7 +224,7 @@
 
     if (!hidlReturn.isOk()) {
         ALOGE("stopRecognition error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
     return hidlReturn;
 }
@@ -243,7 +244,7 @@
 
     if (!hidlReturn.isOk()) {
         ALOGE("stopAllRecognitions error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
     return hidlReturn;
 }
@@ -267,6 +268,9 @@
         std::string serviceName = "sound_trigger.";
         serviceName.append(mModuleName);
         mISoundTrigger = ISoundTriggerHw::getService(serviceName);
+        if (mISoundTrigger != 0) {
+            mISoundTrigger->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+        }
     }
     return mISoundTrigger;
 }