Adds emulated QEMU camera for HAL3.

Only enables preview frames to be captured.

Also refactors EmulatedCameraFactory and makes its style more consistent
with newer source files.

Test: TestingCamera2.1 app
Bug: 64128022
Change-Id: Ife5c70d2ef065c9c9397f28ccefc0ca40e3afa23
diff --git a/camera/Android.mk b/camera/Android.mk
index f24acb1..abb3778 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -72,6 +72,8 @@
 		fake-pipeline2/JpegCompressor.cpp \
 	EmulatedCamera3.cpp \
 		EmulatedFakeCamera3.cpp \
+		EmulatedQemuCamera3.cpp \
+		qemu-pipeline3/QemuSensor.cpp \
 	Exif.cpp \
 	Thumbnail.cpp \
 	WorkerThread.cpp \
diff --git a/camera/EmulatedCameraFactory.cpp b/camera/EmulatedCameraFactory.cpp
index cf8440a..fc97199 100755
--- a/camera/EmulatedCameraFactory.cpp
+++ b/camera/EmulatedCameraFactory.cpp
@@ -21,160 +21,85 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_Factory"
-#include <cutils/log.h>
-#include <cutils/properties.h>
-#include "EmulatedQemuCamera.h"
+
+#include "EmulatedCameraFactory.h"
+#include "EmulatedCameraHotplugThread.h"
 #include "EmulatedFakeCamera.h"
 #include "EmulatedFakeCamera2.h"
 #include "EmulatedFakeCamera3.h"
-#include "EmulatedCameraHotplugThread.h"
-#include "EmulatedCameraFactory.h"
+#include "EmulatedQemuCamera.h"
+#include "EmulatedQemuCamera3.h"
+
+#include <cutils/log.h>
+#include <cutils/properties.h>
 
 extern camera_module_t HAL_MODULE_INFO_SYM;
 
-/* A global instance of EmulatedCameraFactory is statically instantiated and
+/*
+ * A global instance of EmulatedCameraFactory is statically instantiated and
  * initialized when camera emulation HAL is loaded.
  */
-android::EmulatedCameraFactory  gEmulatedCameraFactory;
+android::EmulatedCameraFactory gEmulatedCameraFactory;
 
 namespace android {
 
-EmulatedCameraFactory::EmulatedCameraFactory()
-        : mQemuClient(),
-          mEmulatedCameras(NULL),
-          mEmulatedCameraNum(0),
-          mFakeCameraNum(0),
-          mConstructedOK(false),
-          mCallbacks(NULL)
-{
-    status_t res;
-    /* Connect to the factory service in the emulator, and create Qemu cameras. */
-    if (mQemuClient.connectClient(NULL) == NO_ERROR) {
-        /* Connection has succeeded. Create emulated cameras for each camera
-         * device, reported by the service. */
-        createQemuCameras();
+EmulatedCameraFactory::EmulatedCameraFactory() :
+        mQemuClient(),
+        mEmulatedCameras(nullptr),
+        mEmulatedCameraNum(0),
+        mFakeCameraNum(0),
+        mConstructedOK(false),
+        mCallbacks(nullptr) {
+
+    /*
+     * Figure out how many cameras need to be created, so we can allocate the
+     * array of emulated cameras before populating it.
+     */
+    int emulatedCamerasSize = 0;
+
+    // QEMU Cameras
+    std::vector<QemuCameraInfo> qemuCameras;
+    if (mQemuClient.connectClient(nullptr) == NO_ERROR) {
+        findQemuCameras(&qemuCameras);
+        emulatedCamerasSize += qemuCameras.size();
     }
 
+    // Fake Cameras
+    if (isFakeCameraEmulationOn(/* backCamera */ true)) {
+        mFakeCameraNum++;
+    }
+    if (isFakeCameraEmulationOn(/* backCamera */ false)) {
+        mFakeCameraNum++;
+    }
+    emulatedCamerasSize += mFakeCameraNum;
+
+    /*
+     * We have the number of cameras we need to create, now allocate space for
+     * them.
+     */
+    mEmulatedCameras = new EmulatedBaseCamera*[emulatedCamerasSize];
+    if (mEmulatedCameras == nullptr) {
+        ALOGE("%s: Unable to allocate emulated camera array for %d entries",
+                __FUNCTION__, mEmulatedCameraNum);
+        return;
+    }
+
+    createQemuCameras(qemuCameras);
+
     waitForQemuSfFakeCameraPropertyAvailable();
 
-    if (isBackFakeCameraEmulationOn()) {
-        /* Camera ID. */
-        const int camera_id = mEmulatedCameraNum;
-        /* Use fake camera to emulate back-facing camera. */
-        mEmulatedCameraNum++;
-
-        /* Make sure that array is allocated (in case there were no 'qemu'
-         * cameras created. Note that we preallocate the array so it may contain
-         * two fake cameras: one facing back, and another facing front. */
-        if (mEmulatedCameras == NULL) {
-            mEmulatedCameras = new EmulatedBaseCamera*[mEmulatedCameraNum + 1];
-            if (mEmulatedCameras == NULL) {
-                ALOGE("%s: Unable to allocate emulated camera array for %d entries",
-                     __FUNCTION__, mEmulatedCameraNum);
-                return;
-            }
-            memset(mEmulatedCameras, 0,
-                    (mEmulatedCameraNum + 1) * sizeof(EmulatedBaseCamera*));
-        }
-
-        /* Create, and initialize the fake camera */
-        switch (getBackCameraHalVersion()) {
-            case 1:
-                mEmulatedCameras[camera_id] =
-                        new EmulatedFakeCamera(camera_id, true,
-                                &HAL_MODULE_INFO_SYM.common);
-                break;
-            case 2:
-                mEmulatedCameras[camera_id] =
-                        new EmulatedFakeCamera2(camera_id, true,
-                                &HAL_MODULE_INFO_SYM.common);
-                break;
-            case 3:
-                mEmulatedCameras[camera_id] =
-                        new EmulatedFakeCamera3(camera_id, true,
-                                &HAL_MODULE_INFO_SYM.common);
-                break;
-            default:
-                ALOGE("%s: Unknown back camera hal version requested: %d", __FUNCTION__,
-                        getBackCameraHalVersion());
-        }
-        if (mEmulatedCameras[camera_id] != NULL) {
-            ALOGV("%s: Back camera device version is %d", __FUNCTION__,
-                    getBackCameraHalVersion());
-            res = mEmulatedCameras[camera_id]->Initialize();
-            if (res != NO_ERROR) {
-                ALOGE("%s: Unable to intialize back camera %d: %s (%d)",
-                        __FUNCTION__, camera_id, strerror(-res), res);
-                delete mEmulatedCameras[camera_id];
-                mEmulatedCameraNum--;
-            }
-        } else {
-            mEmulatedCameraNum--;
-            ALOGE("%s: Unable to instantiate fake camera class", __FUNCTION__);
-        }
+    // Create fake cameras, if enabled.
+    if (isFakeCameraEmulationOn(/* backCamera */ true)) {
+        createFakeCamera(/* backCamera */ true);
     }
-
-    if (isFrontFakeCameraEmulationOn()) {
-        /* Camera ID. */
-        const int camera_id = mEmulatedCameraNum;
-        /* Use fake camera to emulate front-facing camera. */
-        mEmulatedCameraNum++;
-
-        /* Make sure that array is allocated (in case there were no 'qemu'
-         * cameras created. */
-        if (mEmulatedCameras == NULL) {
-            mEmulatedCameras = new EmulatedBaseCamera*[mEmulatedCameraNum];
-            if (mEmulatedCameras == NULL) {
-                ALOGE("%s: Unable to allocate emulated camera array for %d entries",
-                     __FUNCTION__, mEmulatedCameraNum);
-                return;
-            }
-            memset(mEmulatedCameras, 0,
-                    mEmulatedCameraNum * sizeof(EmulatedBaseCamera*));
-        }
-
-        /* Create, and initialize the fake camera */
-        switch (getFrontCameraHalVersion()) {
-            case 1:
-                mEmulatedCameras[camera_id] =
-                        new EmulatedFakeCamera(camera_id, false,
-                                &HAL_MODULE_INFO_SYM.common);
-                break;
-            case 2:
-                mEmulatedCameras[camera_id] =
-                        new EmulatedFakeCamera2(camera_id, false,
-                                &HAL_MODULE_INFO_SYM.common);
-                break;
-            case 3:
-                mEmulatedCameras[camera_id] =
-                        new EmulatedFakeCamera3(camera_id, false,
-                                &HAL_MODULE_INFO_SYM.common);
-                break;
-            default:
-                ALOGE("%s: Unknown front camera hal version requested: %d",
-                        __FUNCTION__,
-                        getFrontCameraHalVersion());
-        }
-        if (mEmulatedCameras[camera_id] != NULL) {
-            ALOGV("%s: Front camera device version is %d", __FUNCTION__,
-                    getFrontCameraHalVersion());
-            res = mEmulatedCameras[camera_id]->Initialize();
-            if (res != NO_ERROR) {
-                ALOGE("%s: Unable to intialize front camera %d: %s (%d)",
-                        __FUNCTION__, camera_id, strerror(-res), res);
-                delete mEmulatedCameras[camera_id];
-                mEmulatedCameraNum--;
-            }
-        } else {
-            mEmulatedCameraNum--;
-            ALOGE("%s: Unable to instantiate fake camera class", __FUNCTION__);
-        }
+    if (isFakeCameraEmulationOn(/* backCamera */ false)) {
+        createFakeCamera(/* backCamera */ false);
     }
 
     ALOGE("%d cameras are being emulated. %d of them are fake cameras.",
-          mEmulatedCameraNum, mFakeCameraNum);
+            mEmulatedCameraNum, mFakeCameraNum);
 
-    /* Create hotplug thread */
+    // Create hotplug thread.
     {
         Vector<int> cameraIdVector;
         for (int i = 0; i < mEmulatedCameraNum; ++i) {
@@ -188,72 +113,72 @@
     mConstructedOK = true;
 }
 
-EmulatedCameraFactory::~EmulatedCameraFactory()
-{
-    if (mEmulatedCameras != NULL) {
+EmulatedCameraFactory::~EmulatedCameraFactory() {
+    if (mEmulatedCameras != nullptr) {
         for (int n = 0; n < mEmulatedCameraNum; n++) {
-            if (mEmulatedCameras[n] != NULL) {
+            if (mEmulatedCameras[n] != nullptr) {
                 delete mEmulatedCameras[n];
             }
         }
         delete[] mEmulatedCameras;
     }
 
-    if (mHotplugThread != NULL) {
+    if (mHotplugThread != nullptr) {
         mHotplugThread->requestExit();
         mHotplugThread->join();
     }
 }
 
-/****************************************************************************
+/******************************************************************************
  * Camera HAL API handlers.
  *
  * Each handler simply verifies existence of an appropriate EmulatedBaseCamera
  * instance, and dispatches the call to that instance.
  *
- ***************************************************************************/
+ *****************************************************************************/
 
-int EmulatedCameraFactory::cameraDeviceOpen(int camera_id, hw_device_t** device)
-{
-    ALOGV("%s: id = %d", __FUNCTION__, camera_id);
+int EmulatedCameraFactory::cameraDeviceOpen(int cameraId,
+                                            hw_device_t **device) {
+    ALOGV("%s: id = %d", __FUNCTION__, cameraId);
 
-    *device = NULL;
+    *device = nullptr;
 
     if (!isConstructedOK()) {
-        ALOGE("%s: EmulatedCameraFactory has failed to initialize", __FUNCTION__);
+        ALOGE("%s: EmulatedCameraFactory has failed to initialize",
+                __FUNCTION__);
         return -EINVAL;
     }
 
-    if (camera_id < 0 || camera_id >= getEmulatedCameraNum()) {
+    if (cameraId < 0 || cameraId >= getEmulatedCameraNum()) {
         ALOGE("%s: Camera id %d is out of bounds (%d)",
-             __FUNCTION__, camera_id, getEmulatedCameraNum());
+                __FUNCTION__, cameraId, getEmulatedCameraNum());
         return -ENODEV;
     }
 
-    return mEmulatedCameras[camera_id]->connectCamera(device);
+    return mEmulatedCameras[cameraId]->connectCamera(device);
 }
 
-int EmulatedCameraFactory::getCameraInfo(int camera_id, struct camera_info* info)
-{
-    ALOGV("%s: id = %d", __FUNCTION__, camera_id);
+int EmulatedCameraFactory::getCameraInfo(int cameraId,
+                                         struct camera_info *info) {
+    ALOGV("%s: id = %d", __FUNCTION__, cameraId);
 
     if (!isConstructedOK()) {
-        ALOGE("%s: EmulatedCameraFactory has failed to initialize", __FUNCTION__);
+        ALOGE("%s: EmulatedCameraFactory has failed to initialize",
+                __FUNCTION__);
         return -EINVAL;
     }
 
-    if (camera_id < 0 || camera_id >= getEmulatedCameraNum()) {
+    if (cameraId < 0 || cameraId >= getEmulatedCameraNum()) {
         ALOGE("%s: Camera id %d is out of bounds (%d)",
-             __FUNCTION__, camera_id, getEmulatedCameraNum());
+                __FUNCTION__, cameraId, getEmulatedCameraNum());
         return -ENODEV;
     }
 
-    return mEmulatedCameras[camera_id]->getCameraInfo(info);
+    return mEmulatedCameras[cameraId]->getCameraInfo(info);
 }
 
 int EmulatedCameraFactory::setCallbacks(
-        const camera_module_callbacks_t *callbacks)
-{
+        const camera_module_callbacks_t *callbacks) {
     ALOGV("%s: callbacks = %p", __FUNCTION__, callbacks);
 
     mCallbacks = callbacks;
@@ -263,18 +188,15 @@
 
 void EmulatedCameraFactory::getVendorTagOps(vendor_tag_ops_t* ops) {
     ALOGV("%s: ops = %p", __FUNCTION__, ops);
-
-    // No vendor tags defined for emulator yet, so not touching ops
+    // No vendor tags defined for emulator yet, so not touching ops.
 }
 
 /****************************************************************************
  * Camera HAL API callbacks.
  ***************************************************************************/
 
-int EmulatedCameraFactory::device_open(const hw_module_t* module,
-                                       const char* name,
-                                       hw_device_t** device)
-{
+int EmulatedCameraFactory::device_open(const hw_module_t *module, const char
+        *name, hw_device_t **device) {
     /*
      * Simply verify the parameters, and dispatch the call inside the
      * EmulatedCameraFactory instance.
@@ -282,10 +204,10 @@
 
     if (module != &HAL_MODULE_INFO_SYM.common) {
         ALOGE("%s: Invalid module %p expected %p",
-             __FUNCTION__, module, &HAL_MODULE_INFO_SYM.common);
+                __FUNCTION__, module, &HAL_MODULE_INFO_SYM.common);
         return -EINVAL;
     }
-    if (name == NULL) {
+    if (name == nullptr) {
         ALOGE("%s: NULL name is not expected here", __FUNCTION__);
         return -EINVAL;
     }
@@ -293,31 +215,27 @@
     return gEmulatedCameraFactory.cameraDeviceOpen(atoi(name), device);
 }
 
-int EmulatedCameraFactory::get_number_of_cameras(void)
-{
+int EmulatedCameraFactory::get_number_of_cameras() {
     return gEmulatedCameraFactory.getEmulatedCameraNum();
 }
 
 int EmulatedCameraFactory::get_camera_info(int camera_id,
-                                           struct camera_info* info)
-{
+                        struct camera_info *info) {
     return gEmulatedCameraFactory.getCameraInfo(camera_id, info);
 }
 
 int EmulatedCameraFactory::set_callbacks(
-        const camera_module_callbacks_t *callbacks)
-{
+        const camera_module_callbacks_t *callbacks) {
     return gEmulatedCameraFactory.setCallbacks(callbacks);
 }
 
-void EmulatedCameraFactory::get_vendor_tag_ops(vendor_tag_ops_t* ops)
-{
+void EmulatedCameraFactory::get_vendor_tag_ops(vendor_tag_ops_t *ops) {
     gEmulatedCameraFactory.getVendorTagOps(ops);
 }
 
-int EmulatedCameraFactory::open_legacy(const struct hw_module_t* module,
-        const char* id, uint32_t halVersion, struct hw_device_t** device) {
-    // Not supporting legacy open
+int EmulatedCameraFactory::open_legacy(const struct hw_module_t *module,
+        const char *id, uint32_t halVersion, struct hw_device_t **device) {
+    // Not supporting legacy open.
     return -ENOSYS;
 }
 
@@ -329,24 +247,57 @@
  * Camera information tokens passed in response to the "list" factory query.
  */
 
-/* Device name token. */
-static const char lListNameToken[]    = "name=";
-/* Frame dimensions token. */
-static const char lListDimsToken[]    = "framedims=";
-/* Facing direction token. */
-static const char lListDirToken[]     = "dir=";
+// Device name token.
+static const char *kListNameToken = "name=";
+// Frame dimensions token.
+static const char *kListDimsToken = "framedims=";
+// Facing direction token.
+static const char *kListDirToken = "dir=";
 
-void EmulatedCameraFactory::createQemuCameras()
-{
-    /* Obtain camera list. */
-    char* camera_list = NULL;
-    status_t res = mQemuClient.listCameras(&camera_list);
-    /* Empty list, or list containing just an EOL means that there were no
-     * connected cameras found. */
-    if (res != NO_ERROR || camera_list == NULL || *camera_list == '\0' ||
-        *camera_list == '\n') {
-        if (camera_list != NULL) {
-            free(camera_list);
+
+bool EmulatedCameraFactory::getTokenValue(const char *token,
+        const std::string &s, char **value) {
+    // Find the start of the token.
+    size_t tokenStart = s.find(token);
+    if (tokenStart == std::string::npos) {
+        return false;
+    }
+
+    // Advance to the beginning of the token value.
+    size_t valueStart = tokenStart + strlen(token);
+
+    // Find the length of the token value.
+    size_t valueLength = s.find(' ', valueStart) - valueStart;
+
+    // Extract the value substring.
+    std::string valueStr = s.substr(valueStart, valueLength);
+
+    // Convert to char*.
+    *value = new char[valueStr.length() + 1];
+    if (*value == nullptr) {
+        return false;
+    }
+    strcpy(*value, valueStr.c_str());
+
+    ALOGV("%s: Parsed value is \"%s\"", __FUNCTION__, *value);
+
+    return true;
+}
+
+void EmulatedCameraFactory::findQemuCameras(
+        std::vector<QemuCameraInfo> *qemuCameras) {
+    // Obtain camera list.
+    char *cameraList = nullptr;
+    status_t res = mQemuClient.listCameras(&cameraList);
+
+    /*
+     * Empty list, or list containing just an EOL means that there were no
+     * connected cameras found.
+     */
+    if (res != NO_ERROR || cameraList == nullptr || *cameraList == '\0' ||
+        *cameraList == '\n') {
+        if (cameraList != nullptr) {
+            free(cameraList);
         }
         return;
     }
@@ -356,99 +307,186 @@
      * is the number of the connected cameras.
      */
 
-    int num = 0;
-    const char* eol = strchr(camera_list, '\n');
-    while (eol != NULL) {
-        num++;
-        eol = strchr(eol + 1, '\n');
-    }
+    std::string cameraListStr(cameraList);
+    free(cameraList);
 
-    /* Allocate the array for emulated camera instances. Note that we allocate
-     * two more entries for back and front fake camera emulation. */
-    mEmulatedCameras = new EmulatedBaseCamera*[num + 2];
-    if (mEmulatedCameras == NULL) {
-        ALOGE("%s: Unable to allocate emulated camera array for %d entries",
-             __FUNCTION__, num + 1);
-        free(camera_list);
-        return;
-    }
-    memset(mEmulatedCameras, 0, sizeof(EmulatedBaseCamera*) * (num + 1));
+    size_t lineBegin = 0;
+    size_t lineEnd = cameraListStr.find('\n');
+    while (lineEnd != std::string::npos) {
+        std::string cameraStr = cameraListStr.substr(lineBegin, lineEnd);
 
+        // Parse the 'name', 'framedims', and 'dir' tokens.
+        char *name, *frameDims, *dir;
+        if (getTokenValue(kListNameToken, cameraStr, &name) &&
+                getTokenValue(kListDimsToken, cameraStr, &frameDims) &&
+                getTokenValue(kListDirToken, cameraStr, &dir)) {
+            // Push the camera info if it was all successfully parsed.
+            qemuCameras->push_back(QemuCameraInfo{
+                .name = name,
+                .frameDims = frameDims,
+                .dir = dir,
+            });
+        } else {
+            ALOGW("%s: Bad camera information: %s", __FUNCTION__,
+                    cameraStr.c_str());
+        }
+        // Skip over the newline for the beginning of the next line.
+        lineBegin = lineEnd + 1;
+        lineEnd = cameraListStr.find('\n', lineBegin);
+    }
+}
+
+void EmulatedCameraFactory::createQemuCameras(
+        const std::vector<QemuCameraInfo> &qemuCameras) {
     /*
-     * Iterate the list, creating, and initializin emulated qemu cameras for each
-     * entry (line) in the list.
+     * Iterate the list, creating, and initializing emulated QEMU cameras for each
+     * entry in the list.
      */
 
-    int index = 0;
-    char* cur_entry = camera_list;
-    while (cur_entry != NULL && *cur_entry != '\0' && index < num) {
-        /* Find the end of the current camera entry, and terminate it with zero
-         * for simpler string manipulation. */
-        char* next_entry = strchr(cur_entry, '\n');
-        if (next_entry != NULL) {
-            *next_entry = '\0';
-            next_entry++;   // Start of the next entry.
-        }
-
-        /* Find 'name', 'framedims', and 'dir' tokens that are required here. */
-        char* name_start = strstr(cur_entry, lListNameToken);
-        char* dim_start = strstr(cur_entry, lListDimsToken);
-        char* dir_start = strstr(cur_entry, lListDirToken);
-        if (name_start != NULL && dim_start != NULL && dir_start != NULL) {
-            /* Advance to the token values. */
-            name_start += strlen(lListNameToken);
-            dim_start += strlen(lListDimsToken);
-            dir_start += strlen(lListDirToken);
-
-            /* Terminate token values with zero. */
-            char* s = strchr(name_start, ' ');
-            if (s != NULL) {
-                *s = '\0';
-            }
-            s = strchr(dim_start, ' ');
-            if (s != NULL) {
-                *s = '\0';
-            }
-            s = strchr(dir_start, ' ');
-            if (s != NULL) {
-                *s = '\0';
-            }
-
-            /* Create and initialize qemu camera. */
-            EmulatedQemuCamera* qemu_cam =
-                new EmulatedQemuCamera(index, &HAL_MODULE_INFO_SYM.common);
-            if (NULL != qemu_cam) {
-                res = qemu_cam->Initialize(name_start, dim_start, dir_start);
-                if (res == NO_ERROR) {
-                    mEmulatedCameras[index] = qemu_cam;
-                    index++;
-                } else {
-                    delete qemu_cam;
-                }
-            } else {
-                ALOGE("%s: Unable to instantiate EmulatedQemuCamera",
-                     __FUNCTION__);
-            }
+    /*
+     * We use this index only for determining which direction the webcam should
+     * face. Otherwise, mEmulatedCameraNum represents the camera ID and the
+     * index into mEmulatedCameras.
+     */
+    int qemuIndex = 0;
+    for (const auto &cameraInfo : qemuCameras) {
+        /*
+         * Here, we're assuming the first webcam is intended to be the back
+         * camera and any other webcams are front cameras.
+         */
+        int halVersion = 0;
+        if (qemuIndex == 0) {
+            halVersion = getCameraHalVersion(/* backCamera */ true);
         } else {
-            ALOGW("%s: Bad camera information: %s", __FUNCTION__, cur_entry);
+            halVersion = getCameraHalVersion(/* backCamera */ false);
         }
 
-        cur_entry = next_entry;
+        // Create and initialize QEMU camera.
+        EmulatedBaseCamera *qemuCam = nullptr;
+        status_t res;
+        switch (halVersion) {
+            case 1:
+                EmulatedQemuCamera *qemuCamOne;
+                qemuCamOne = new EmulatedQemuCamera(
+                        mEmulatedCameraNum, &HAL_MODULE_INFO_SYM.common);
+                if (qemuCamOne == nullptr) {
+                    ALOGE("%s: Unable to instantiate EmulatedQemuCamera",
+                            __FUNCTION__);
+                } else {
+                    /*
+                     * We have to initialize in each switch case, because
+                     * EmulatedBaseCamera::Initialize has a different method
+                     * signature.
+                     *
+                     * TODO: Having an EmulatedBaseQemuCamera class
+                     * could fix this issue.
+                     */
+                    res = qemuCamOne->Initialize(
+                            cameraInfo.name,
+                            cameraInfo.frameDims,
+                            cameraInfo.dir);
+                }
+                qemuCam = qemuCamOne;
+                break;
+            case 2:
+                ALOGE("%s: QEMU support for camera hal version %d is not "
+                        "implemented", __FUNCTION__, halVersion);
+                break;
+            case 3:
+                EmulatedQemuCamera3 *qemuCamThree;
+                qemuCamThree = new EmulatedQemuCamera3(
+                        mEmulatedCameraNum, &HAL_MODULE_INFO_SYM.common);
+                if (qemuCamThree == nullptr) {
+                    ALOGE("%s: Unable to instantiate EmulatedQemuCamera3",
+                            __FUNCTION__);
+                } else {
+                    res = qemuCamThree->Initialize(
+                            cameraInfo.name,
+                            cameraInfo.frameDims,
+                            cameraInfo.dir);
+                }
+                qemuCam = qemuCamThree;
+                break;
+            default:
+                ALOGE("%s: Unknown camera hal version requested: %d",
+                        __FUNCTION__, halVersion);
+        }
+
+        if (qemuCam == nullptr) {
+            ALOGE("%s: Unable to instantiate EmulatedQemuCamera",
+                    __FUNCTION__);
+        } else {
+            if (res == NO_ERROR) {
+                mEmulatedCameras[mEmulatedCameraNum] = qemuCam;
+                qemuIndex++;
+                mEmulatedCameraNum++;
+            } else {
+                delete qemuCam;
+            }
+        }
+    }
+}
+
+void EmulatedCameraFactory::createFakeCamera(bool backCamera) {
+    int halVersion = getCameraHalVersion(backCamera);
+
+    /*
+     * Create and initialize the fake camera, using the index into
+     * mEmulatedCameras as the camera ID.
+     */
+    switch (halVersion) {
+        case 1:
+            mEmulatedCameras[mEmulatedCameraNum] =
+                    new EmulatedFakeCamera(mEmulatedCameraNum, backCamera,
+                            &HAL_MODULE_INFO_SYM.common);
+            break;
+        case 2:
+            mEmulatedCameras[mEmulatedCameraNum] =
+                    new EmulatedFakeCamera2(mEmulatedCameraNum, backCamera,
+                            &HAL_MODULE_INFO_SYM.common);
+            break;
+        case 3:
+            mEmulatedCameras[mEmulatedCameraNum] =
+                    new EmulatedFakeCamera3(mEmulatedCameraNum, backCamera,
+                            &HAL_MODULE_INFO_SYM.common);
+            break;
+        default:
+            ALOGE("%s: Unknown %s camera hal version requested: %d",
+                    __FUNCTION__, backCamera ? "back" : "front", halVersion);
     }
 
-    mEmulatedCameraNum = index;
+    if (mEmulatedCameras[mEmulatedCameraNum] == nullptr) {
+        ALOGE("%s: Unable to instantiate fake camera class", __FUNCTION__);
+    } else {
+        ALOGV("%s: %s camera device version is %d", __FUNCTION__,
+                backCamera ? "Back" : "Front", halVersion);
+        status_t res = mEmulatedCameras[mEmulatedCameraNum]->Initialize();
+        if (res == NO_ERROR) {
+            // Camera creation and initialization was successful.
+            mEmulatedCameraNum++;
+        } else {
+            ALOGE("%s: Unable to initialize %s camera %d: %s (%d)",
+                    __FUNCTION__, backCamera ? "back" : "front",
+                    mEmulatedCameraNum, strerror(-res), res);
+            delete mEmulatedCameras[mEmulatedCameraNum];
+        }
+    }
 }
 
 void EmulatedCameraFactory::waitForQemuSfFakeCameraPropertyAvailable() {
-    // Camera service may start running before qemu-props sets qemu.sf.fake_camera to
-    // any of the follwing four values: "none,front,back,both"; so we need to wait.
-    // android/camera/camera-service.c
-    // bug: 30768229
+    /*
+     * Camera service may start running before qemu-props sets
+     * qemu.sf.fake_camera to any of the follwing four values:
+     * "none,front,back,both"; so we need to wait.
+     *
+     * android/camera/camera-service.c
+     * bug: 30768229
+     */
     int numAttempts = 100;
     char prop[PROPERTY_VALUE_MAX];
     bool timeout = true;
     for (int i = 0; i < numAttempts; ++i) {
-        if (property_get("qemu.sf.fake_camera", prop, NULL) != 0 ) {
+        if (property_get("qemu.sf.fake_camera", prop, nullptr) != 0 ) {
             timeout = false;
             break;
         }
@@ -459,68 +497,45 @@
     }
 }
 
-bool EmulatedCameraFactory::isBackFakeCameraEmulationOn()
-{
-    /* Defined by 'qemu.sf.fake_camera' boot property: if property exist, and
-     * is set to 'both', or 'back', then fake camera is used to emulate back
-     * camera. */
+bool EmulatedCameraFactory::isFakeCameraEmulationOn(bool backCamera) {
+    /*
+     * Defined by 'qemu.sf.fake_camera' boot property. If the property exists,
+     * and if it's set to 'both', then fake cameras are used to emulate both
+     * sides. If it's set to 'back' or 'front', then a fake camera is used only
+     * to emulate the back or front camera, respectively.
+     */
     char prop[PROPERTY_VALUE_MAX];
-    if ((property_get("qemu.sf.fake_camera", prop, NULL) > 0) &&
-        (!strcmp(prop, "both") || !strcmp(prop, "back"))) {
+    if ((property_get("qemu.sf.fake_camera", prop, nullptr) > 0) &&
+        (!strcmp(prop, "both") ||
+         !strcmp(prop, backCamera ? "back" : "front"))) {
         return true;
     } else {
         return false;
     }
 }
 
-int EmulatedCameraFactory::getBackCameraHalVersion()
-{
-    /* Defined by 'qemu.sf.back_camera_hal_version' boot property: if the
-     * property doesn't exist, it is assumed to be 1. */
+int EmulatedCameraFactory::getCameraHalVersion(bool backCamera) {
+    /*
+     * Defined by 'qemu.sf.front_camera_hal_version' and
+     * 'qemu.sf.back_camera_hal_version' boot properties. If the property
+     * doesn't exist, it is assumed we are working with HAL v1.
+     */
     char prop[PROPERTY_VALUE_MAX];
-    if (property_get("qemu.sf.back_camera_hal", prop, NULL) > 0) {
-        char *prop_end = prop;
-        int val = strtol(prop, &prop_end, 10);
-        if (*prop_end == '\0') {
+    const char *propQuery = backCamera ?
+            "qemu.sf.back_camera_hal" :
+            "qemu.sf.front_camera_hal";
+    if (property_get(propQuery, prop, nullptr) > 0) {
+        char *propEnd = prop;
+        int val = strtol(prop, &propEnd, 10);
+        if (*propEnd == '\0') {
             return val;
         }
-        // Badly formatted property, should just be a number
+        // Badly formatted property. It should just be a number.
         ALOGE("qemu.sf.back_camera_hal is not a number: %s", prop);
     }
     return 1;
 }
 
-bool EmulatedCameraFactory::isFrontFakeCameraEmulationOn()
-{
-    /* Defined by 'qemu.sf.fake_camera' boot property: if property exist, and
-     * is set to 'both', or 'front', then fake camera is used to emulate front
-     * camera. */
-    char prop[PROPERTY_VALUE_MAX];
-    if ((property_get("qemu.sf.fake_camera", prop, NULL) > 0) &&
-        (!strcmp(prop, "both") || !strcmp(prop, "front"))) {
-        return true;
-    } else {
-        return false;
-    }
-}
-
-int EmulatedCameraFactory::getFrontCameraHalVersion()
-{
-    /* Defined by 'qemu.sf.front_camera_hal_version' boot property: if the
-     * property doesn't exist, it is assumed to be 1. */
-    char prop[PROPERTY_VALUE_MAX];
-    if (property_get("qemu.sf.front_camera_hal", prop, NULL) > 0) {
-        char *prop_end = prop;
-        int val = strtol(prop, &prop_end, 10);
-        if (*prop_end == '\0') {
-            return val;
-        }
-        // Badly formatted property, should just be a number
-        ALOGE("qemu.sf.front_camera_hal is not a number: %s", prop);
-    }
-    return 1;
-}
-
 void EmulatedCameraFactory::onStatusChanged(int cameraId, int newStatus) {
 
     EmulatedBaseCamera *cam = mEmulatedCameras[cameraId];
@@ -529,7 +544,7 @@
         return;
     }
 
-    /**
+    /*
      * (Order is important)
      * Send the callback first to framework, THEN close the camera.
      */
@@ -540,7 +555,7 @@
     }
 
     const camera_module_callbacks_t* cb = mCallbacks;
-    if (cb != NULL && cb->camera_device_status_change != NULL) {
+    if (cb != nullptr && cb->camera_device_status_change != nullptr) {
         cb->camera_device_status_change(cb, cameraId, newStatus);
     }
 
@@ -549,16 +564,15 @@
     } else if (newStatus == CAMERA_DEVICE_STATUS_PRESENT) {
         cam->plugCamera();
     }
-
 }
 
 /********************************************************************************
  * Initializer for the static member structure.
  *******************************************************************************/
 
-/* Entry point for camera HAL API. */
+// Entry point for camera HAL API.
 struct hw_module_methods_t EmulatedCameraFactory::mCameraModuleMethods = {
     open: EmulatedCameraFactory::device_open
 };
 
-}; /* namespace android */
+}; // end of namespace android
diff --git a/camera/EmulatedCameraFactory.h b/camera/EmulatedCameraFactory.h
index 923fe7e..5e55c03 100755
--- a/camera/EmulatedCameraFactory.h
+++ b/camera/EmulatedCameraFactory.h
@@ -17,10 +17,12 @@
 #ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H
 #define HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H
 
-#include <utils/RefBase.h>
 #include "EmulatedBaseCamera.h"
 #include "QemuClient.h"
 
+#include <utils/RefBase.h>
+#include <vector>
+
 namespace android {
 
 struct EmulatedCameraHotplugThread;
@@ -31,7 +33,8 @@
  * instantiated and initialized when camera emulation HAL is loaded.
  */
 
-/* Class EmulatedCameraFactoryManages cameras available for the emulation.
+/*
+ * Class EmulatedCameraFactoryManages cameras available for the emulation.
  *
  * When the global static instance of this class is created on the module load,
  * it enumerates cameras available for the emulation by connecting to the
@@ -50,93 +53,123 @@
  */
 class EmulatedCameraFactory {
 public:
-    /* Constructs EmulatedCameraFactory instance.
+    /*
+     * Constructs EmulatedCameraFactory instance.
      * In this constructor the factory will create and initialize a list of
      * emulated cameras. All errors that occur on this constructor are reported
      * via mConstructedOK data member of this class.
      */
     EmulatedCameraFactory();
 
-    /* Destructs EmulatedCameraFactory instance. */
+    /*
+     * Destructs EmulatedCameraFactory instance.
+     */
     ~EmulatedCameraFactory();
 
+public:
     /****************************************************************************
      * Camera HAL API handlers.
      ***************************************************************************/
 
-public:
-    /* Opens (connects to) a camera device.
+    /*
+     * Opens (connects to) a camera device.
+     *
      * This method is called in response to hw_module_methods_t::open callback.
      */
-    int cameraDeviceOpen(int camera_id, hw_device_t** device);
+    int cameraDeviceOpen(int camera_id, hw_device_t **device);
 
-    /* Gets emulated camera information.
-     * This method is called in response to camera_module_t::get_camera_info callback.
+    /*
+     * Gets emulated camera information.
+     *
+     * This method is called in response to camera_module_t::get_camera_info
+     * callback.
      */
     int getCameraInfo(int camera_id, struct camera_info *info);
 
-    /* Sets emulated camera callbacks.
-     * This method is called in response to camera_module_t::set_callbacks callback.
+    /*
+     * Sets emulated camera callbacks.
+     *
+     * This method is called in response to camera_module_t::set_callbacks
+     * callback.
      */
     int setCallbacks(const camera_module_callbacks_t *callbacks);
 
-    /* Fill in vendor tags for the module
-     * This method is called in response to camera_module_t::get_vendor_tag_ops callback.
+    /*
+     * Fill in vendor tags for the module.
+     *
+     * This method is called in response to camera_module_t::get_vendor_tag_ops
+     * callback.
      */
-    void getVendorTagOps(vendor_tag_ops_t* ops);
+    void getVendorTagOps(vendor_tag_ops_t *ops);
 
+public:
     /****************************************************************************
      * Camera HAL API callbacks.
      ***************************************************************************/
 
-public:
-    /* camera_module_t::get_number_of_cameras callback entry point. */
+    /*
+     * camera_module_t::get_number_of_cameras callback entry point.
+     */
     static int get_number_of_cameras(void);
 
-    /* camera_module_t::get_camera_info callback entry point. */
+    /*
+     * camera_module_t::get_camera_info callback entry point.
+     */
     static int get_camera_info(int camera_id, struct camera_info *info);
 
-    /* camera_module_t::set_callbacks callback entry point. */
+    /*
+     * camera_module_t::set_callbacks callback entry point.
+     */
     static int set_callbacks(const camera_module_callbacks_t *callbacks);
 
-    /* camera_module_t::get_vendor_tag_ops callback entry point */
-    static void get_vendor_tag_ops(vendor_tag_ops_t* ops);
+    /*
+     * camera_module_t::get_vendor_tag_ops callback entry point.
+     */
+    static void get_vendor_tag_ops(vendor_tag_ops_t *ops);
 
-    /* camera_module_t::open_legacy callback entry point */
-    static int open_legacy(const struct hw_module_t* module, const char* id,
-            uint32_t halVersion, struct hw_device_t** device);
+    /*
+     * camera_module_t::open_legacy callback entry point.
+     */
+    static int open_legacy(const struct hw_module_t *module, const char *id,
+            uint32_t halVersion, struct hw_device_t **device);
 
 private:
-    /* hw_module_methods_t::open callback entry point. */
-    static int device_open(const hw_module_t* module,
-                           const char* name,
-                           hw_device_t** device);
+    /*
+     * hw_module_methods_t::open callback entry point.
+     */
+    static int device_open(const hw_module_t *module, const char *name,
+            hw_device_t **device);
 
+public:
     /****************************************************************************
      * Public API.
      ***************************************************************************/
 
-public:
-
-    /* Gets fake camera orientation. */
+    /*
+     * Gets fake camera orientation.
+     */
     int getFakeCameraOrientation() {
-        /* TODO: Have a boot property that controls that. */
+        // TODO: Have a boot property that controls that.
         return 0;
     }
 
-    /* Gets qemu camera orientation. */
+    /*
+     * Gets qemu camera orientation.
+     */
     int getQemuCameraOrientation() {
-        /* TODO: Have a boot property that controls that. */
+        // TODO: Have a boot property that controls that.
         return 0;
     }
 
-    /* Gets number of emulated cameras.
+    /*
+     * Gets number of emulated cameras.
      */
     int getEmulatedCameraNum() const {
         return mEmulatedCameraNum;
     }
 
-    /* Checks whether or not the constructor has succeeded.
+    /*
+     * Checks whether or not the constructor has succeeded.
      */
     bool isConstructedOK() const {
         return mConstructedOK;
@@ -144,67 +177,105 @@
 
     void onStatusChanged(int cameraId, int newStatus);
 
+private:
     /****************************************************************************
      * Private API
      ***************************************************************************/
 
-private:
-    /* Populates emulated cameras array with cameras that are available via
-     * 'camera' service in the emulator. For each such camera and instance of
-     * the EmulatedCameraQemud will be created and added to the mEmulatedCameras
-     * array.
-     */
-    void createQemuCameras();
+    // For carrying QEMU camera information between methods.
+    struct QemuCameraInfo {
+        char *name;
+        char *frameDims;
+        char *dir;
+    };
 
-    /* Waits till qemu-props has done setup, timeout after 500ms */
+    /*
+     * Args:
+     *     token: token whose value is being searched for.
+     *     s: string containing one or more tokens in the format
+     *        "token_name=token_value".
+     *     value: Output parameter for the value of the token.
+     *
+     * Returns:
+     *     true if the token was successfully parsed.
+     */
+    bool getTokenValue(const char *token, const std::string &s, char **value);
+
+    /*
+     * Args:
+     *     qemuCameras: Output parameter for the list of detected camera
+     *                  strings. Each camera is represented by a string of three
+     *                  attributes "name=... framedims=... dir=...", not
+     *                  necessarily in that order.
+     */
+    void findQemuCameras(std::vector<QemuCameraInfo> *qemuCameras);
+
+    /*
+     * Populates emulated cameras array with cameras that are available via
+     * 'camera' service in the emulator. For each such camera, one of the
+     * EmulatedQemuCamera* classes will be created and added to
+     * mEmulatedCameras (based on the HAL version specified in system
+     * properties).
+     */
+    void createQemuCameras(const std::vector<QemuCameraInfo> &qemuCameras);
+
+    /*
+     * Creates a fake camera and adds it to mEmulatedCameras. If backCamera is
+     * true, it will be created as if it were a camera on the back of the phone.
+     * Otherwise, it will be front-facing.
+     */
+    void createFakeCamera(bool backCamera);
+
+    /*
+     * Waits till qemu-props has done setup, timeout after 500ms.
+     */
     void waitForQemuSfFakeCameraPropertyAvailable();
 
-    /* Checks if fake camera emulation is on for the camera facing back. */
-    bool isBackFakeCameraEmulationOn();
+    /*
+     * Checks if fake camera emulation is on for the camera facing back.
+     */
+    bool isFakeCameraEmulationOn(bool backCamera);
 
-    /* Gets camera device version number to use for back camera emulation */
-    int getBackCameraHalVersion();
+    /*
+     * Gets camera device version number to use for back camera emulation.
+     */
+    int getCameraHalVersion(bool backCamera);
 
-    /* Checks if fake camera emulation is on for the camera facing front. */
-    bool isFrontFakeCameraEmulationOn();
 
-    /* Gets camera device version number to use for front camera emulation */
-    int getFrontCameraHalVersion();
-
+private:
     /****************************************************************************
      * Data members.
      ***************************************************************************/
 
-private:
-    /* Connection to the camera service in the emulator. */
-    FactoryQemuClient   mQemuClient;
+    // Connection to the camera service in the emulator.
+    FactoryQemuClient mQemuClient;
 
-    /* Array of cameras available for the emulation. */
-    EmulatedBaseCamera**    mEmulatedCameras;
+    // Array of cameras available for the emulation.
+    EmulatedBaseCamera **mEmulatedCameras;
 
-    /* Number of emulated cameras (including the fake ones). */
-    int                 mEmulatedCameraNum;
+    // Number of emulated cameras (including the fake ones).
+    int mEmulatedCameraNum;
 
-    /* Number of emulated fake cameras. */
-    int                 mFakeCameraNum;
+    // Number of emulated fake cameras.
+    int mFakeCameraNum;
 
-    /* Flags whether or not constructor has succeeded. */
-    bool                mConstructedOK;
+    // Flags whether or not constructor has succeeded.
+    bool mConstructedOK;
 
-    /* Camera callbacks (for status changing) */
-    const camera_module_callbacks_t* mCallbacks;
+    // Camera callbacks (for status changing).
+    const camera_module_callbacks_t *mCallbacks;
 
-    /* Hotplug thread (to call onStatusChanged) */
+    // Hotplug thread (to call onStatusChanged).
     sp<EmulatedCameraHotplugThread> mHotplugThread;
 
 public:
-    /* Contains device open entry point, as required by HAL API. */
-    static struct hw_module_methods_t   mCameraModuleMethods;
+    // Contains device open entry point, as required by HAL API.
+    static struct hw_module_methods_t mCameraModuleMethods;
 };
 
-}; /* namespace android */
+}; // end of namespace android
 
-/* References the global EmulatedCameraFactory instance. */
-extern android::EmulatedCameraFactory   gEmulatedCameraFactory;
+// References the global EmulatedCameraFactory instance.
+extern android::EmulatedCameraFactory gEmulatedCameraFactory;
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H */
+#endif  // HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H
diff --git a/camera/EmulatedQemuCamera3.cpp b/camera/EmulatedQemuCamera3.cpp
new file mode 100644
index 0000000..1c8e41f
--- /dev/null
+++ b/camera/EmulatedQemuCamera3.cpp
@@ -0,0 +1,1832 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedQemuCamera3 that encapsulates
+ * functionality of an advanced fake camera.
+ */
+
+// Uncomment LOG_NDEBUG to enable verbose logging, and uncomment both LOG_NDEBUG
+// *and* LOG_NNDEBUG to enable very verbose logging.
+
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0
+
+#define LOG_TAG "EmulatedCamera_QemuCamera3"
+
+#if defined(LOG_NNDEBUG) && LOG_NNDEBUG == 0
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+#include "EmulatedCameraFactory.h"
+#include "GrallocModule.h"
+#include "EmulatedQemuCamera3.h"
+
+#include <cmath>
+#include <cutils/properties.h>
+#include <inttypes.h>
+#include <sstream>
+#include <ui/Fence.h>
+#include <utils/Log.h>
+#include <vector>
+
+namespace android {
+
+/*
+ * Constants for Camera Capabilities
+ */
+
+const int64_t USEC = 1000LL;
+const int64_t MSEC = USEC * 1000LL;
+const int64_t SEC = MSEC * 1000LL;
+
+const int32_t EmulatedQemuCamera3::kAvailableFormats[] = {
+    HAL_PIXEL_FORMAT_BLOB,
+    HAL_PIXEL_FORMAT_RGBA_8888,
+    HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+    // These are handled by YCbCr_420_888
+    //        HAL_PIXEL_FORMAT_YV12,
+    //        HAL_PIXEL_FORMAT_YCrCb_420_SP,
+    HAL_PIXEL_FORMAT_YCbCr_420_888
+};
+
+/*****************************************************************************
+ * Constructor/Destructor
+ ****************************************************************************/
+
+EmulatedQemuCamera3::EmulatedQemuCamera3(int cameraId, struct hw_module_t* module) :
+        EmulatedCamera3(cameraId, module) {
+    ALOGI("Constructing emulated qemu camera 3: ID %d", mCameraID);
+
+    for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; ++i) {
+        mDefaultTemplates[i] = nullptr;
+    }
+}
+
+EmulatedQemuCamera3::~EmulatedQemuCamera3() {
+    for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; ++i) {
+        if (mDefaultTemplates[i] != nullptr) {
+            free_camera_metadata(mDefaultTemplates[i]);
+        }
+    }
+    delete[] mDeviceName;
+}
+
+/*****************************************************************************
+ * Public Methods
+ ****************************************************************************/
+
+/*
+ * Camera Device Lifecycle Methods
+ */
+
+void EmulatedQemuCamera3::parseResolutions(const char *frameDims) {
+    const size_t kMaxFrameDimsLength = 512;
+    size_t frameDimsLength = strnlen(frameDims, kMaxFrameDimsLength);
+    if (frameDimsLength == kMaxFrameDimsLength) {
+        ALOGE("%s: Frame dimensions string was too long (>= %d)",
+                __FUNCTION__, frameDimsLength);
+        return;
+    } else if (frameDimsLength == 0) {
+        ALOGE("%s: Frame dimensions string was NULL or zero-length",
+                __FUNCTION__);
+        return;
+    }
+    std::stringstream ss(frameDims);
+    std::string input;
+    while (std::getline(ss, input, ',')) {
+        int width = 0;
+        int height = 0;
+        char none = 0;
+        /*
+         * Expect only two results because that means there was nothing after
+         * the height, we don't want any trailing characters. Otherwise, we just
+         * ignore this entry.
+         */
+        if (sscanf(input.c_str(), "%dx%d%c", &width, &height, &none) == 2) {
+            mResolutions.push_back(std::pair<int32_t,int32_t>(width, height));
+            ALOGE("%s: %dx%d", __FUNCTION__, width, height);
+        }
+    }
+
+    /*
+     * We assume the sensor size of the webcam is the resolution with the
+     * largest area. Any resolution with a dimension that exceeds the sensor
+     * size will be rejected, so Camera API calls will start failing. To work
+     * around this, we remove any resolutions with at least one dimension
+     * exceeding that of the max area resolution.
+     */
+
+    // Find the resolution with the maximum area and use that as the sensor
+    // size.
+    int maxArea = 0;
+    for (const auto &res : mResolutions) {
+        int area = res.first * res.second;
+        if (area > maxArea) {
+            maxArea = area;
+            mSensorWidth = res.first;
+            mSensorHeight = res.second;
+        }
+    }
+
+    // Remove any resolution with a dimension exceeding the sensor size.
+    for (auto res = mResolutions.begin(); res != mResolutions.end(); ) {
+        if (res->first > mSensorWidth || res->second > mSensorHeight) {
+            // Width and/or height larger than sensor. Remove it.
+            res = mResolutions.erase(res);
+        } else {
+            ++res;
+        }
+    }
+
+    if (mResolutions.empty()) {
+        ALOGE("%s: Qemu camera has no valid resolutions", __FUNCTION__);
+    }
+}
+
+status_t EmulatedQemuCamera3::Initialize(const char *deviceName,
+                                         const char *frameDims,
+                                         const char *facingDir) {
+    if (mStatus != STATUS_ERROR) {
+        ALOGE("%s: Already initialized!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    /*
+     * Save parameters for later.
+     */
+    mDeviceName = deviceName;
+    parseResolutions(frameDims);
+    if (strcmp("back", facingDir) == 0) {
+        mFacingBack = true;
+    } else {
+        mFacingBack = false;
+    }
+    // We no longer need these two strings.
+    delete[] frameDims;
+    delete[] facingDir;
+
+    status_t res = getCameraCapabilities();
+    if (res != OK) {
+        ALOGE("%s: Unable to get camera capabilities: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    res = constructStaticInfo();
+    if (res != OK) {
+        ALOGE("%s: Unable to allocate static info: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    return EmulatedCamera3::Initialize();
+}
+
+status_t EmulatedQemuCamera3::connectCamera(hw_device_t** device) {
+    Mutex::Autolock l(mLock);
+    status_t res;
+
+    if (mStatus != STATUS_CLOSED) {
+        ALOGE("%s: Can't connect in state %d", __FUNCTION__, mStatus);
+        return INVALID_OPERATION;
+    }
+
+    /*
+     * Initialize sensor.
+     */
+    mSensor = new QemuSensor(mDeviceName, mSensorWidth, mSensorHeight);
+    mSensor->setQemuSensorListener(this);
+    res = mSensor->startUp();
+    if (res != NO_ERROR) {
+        return res;
+    }
+
+    mReadoutThread = new ReadoutThread(this);
+    mJpegCompressor = new JpegCompressor();
+
+    res = mReadoutThread->run("EmuCam3::readoutThread");
+    if (res != NO_ERROR) return res;
+
+    return EmulatedCamera3::connectCamera(device);
+}
+
+status_t EmulatedQemuCamera3::closeCamera() {
+    status_t res;
+    {
+        Mutex::Autolock l(mLock);
+        if (mStatus == STATUS_CLOSED) return OK;
+
+        res = mSensor->shutDown();
+        if (res != NO_ERROR) {
+            ALOGE("%s: Unable to shut down sensor: %d", __FUNCTION__, res);
+            return res;
+        }
+        mSensor.clear();
+
+        mReadoutThread->requestExit();
+    }
+
+    mReadoutThread->join();
+
+    {
+        Mutex::Autolock l(mLock);
+        // Clear out private stream information.
+        for (StreamIterator s = mStreams.begin(); s != mStreams.end(); s++) {
+            PrivateStreamInfo *privStream =
+                    static_cast<PrivateStreamInfo*>((*s)->priv);
+            delete privStream;
+            (*s)->priv = nullptr;
+        }
+        mStreams.clear();
+        mReadoutThread.clear();
+    }
+
+    return EmulatedCamera3::closeCamera();
+}
+
+status_t EmulatedQemuCamera3::getCameraInfo(struct camera_info *info) {
+    info->facing = mFacingBack ? CAMERA_FACING_BACK : CAMERA_FACING_FRONT;
+    info->orientation = gEmulatedCameraFactory.getFakeCameraOrientation();
+    return EmulatedCamera3::getCameraInfo(info);
+}
+
+/*
+ * Camera3 Interface Methods
+ */
+
+status_t EmulatedQemuCamera3::configureStreams(
+        camera3_stream_configuration *streamList) {
+    Mutex::Autolock l(mLock);
+    ALOGV("%s: %d streams", __FUNCTION__, streamList->num_streams);
+
+    if (mStatus != STATUS_OPEN && mStatus != STATUS_READY) {
+        ALOGE("%s: Cannot configure streams in state %d",
+                __FUNCTION__, mStatus);
+        return NO_INIT;
+    }
+
+    /*
+     * Sanity-check input list.
+     */
+    if (streamList == nullptr) {
+        ALOGE("%s: NULL stream configuration", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    if (streamList->streams == nullptr) {
+        ALOGE("%s: NULL stream list", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    if (streamList->num_streams < 1) {
+        ALOGE("%s: Bad number of streams requested: %d", __FUNCTION__,
+                streamList->num_streams);
+        return BAD_VALUE;
+    }
+
+    camera3_stream_t *inputStream = nullptr;
+    for (size_t i = 0; i < streamList->num_streams; ++i) {
+        camera3_stream_t *newStream = streamList->streams[i];
+
+        if (newStream == nullptr) {
+            ALOGE("%s: Stream index %zu was NULL", __FUNCTION__, i);
+            return BAD_VALUE;
+        }
+
+        ALOGV("%s: Stream %p (id %zu), type %d, usage 0x%x, format 0x%x",
+                __FUNCTION__, newStream, i, newStream->stream_type,
+                newStream->usage, newStream->format);
+
+        if (newStream->stream_type == CAMERA3_STREAM_INPUT ||
+            newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL) {
+            if (inputStream != nullptr) {
+                ALOGE("%s: Multiple input streams requested!", __FUNCTION__);
+                return BAD_VALUE;
+            }
+            inputStream = newStream;
+        }
+
+        bool validFormat = false;
+        size_t numFormats = sizeof(kAvailableFormats) /
+                sizeof(kAvailableFormats[0]);
+        for (size_t f = 0; f < numFormats; ++f) {
+            if (newStream->format == kAvailableFormats[f]) {
+                validFormat = true;
+                break;
+            }
+        }
+        if (!validFormat) {
+            ALOGE("%s: Unsupported stream format 0x%x requested",
+                    __FUNCTION__, newStream->format);
+            return BAD_VALUE;
+        }
+    }
+    mInputStream = inputStream;
+
+    /*
+     * Initially mark all existing streams as not alive.
+     */
+    for (StreamIterator s = mStreams.begin(); s != mStreams.end(); ++s) {
+        PrivateStreamInfo *privStream =
+                static_cast<PrivateStreamInfo*>((*s)->priv);
+        privStream->alive = false;
+    }
+
+    /*
+     * Find new streams and mark still-alive ones.
+     */
+    for (size_t i = 0; i < streamList->num_streams; ++i) {
+        camera3_stream_t *newStream = streamList->streams[i];
+        if (newStream->priv == nullptr) {
+            // New stream. Construct info.
+            PrivateStreamInfo *privStream = new PrivateStreamInfo();
+            privStream->alive = true;
+
+            newStream->max_buffers = kMaxBufferCount;
+            newStream->priv = privStream;
+            mStreams.push_back(newStream);
+        } else {
+            // Existing stream, mark as still alive.
+            PrivateStreamInfo *privStream =
+                    static_cast<PrivateStreamInfo*>(newStream->priv);
+            privStream->alive = true;
+        }
+        // Always update usage and max buffers.
+        newStream->max_buffers = kMaxBufferCount;
+        switch (newStream->stream_type) {
+            case CAMERA3_STREAM_OUTPUT:
+                newStream->usage = GRALLOC_USAGE_HW_CAMERA_WRITE;
+                break;
+            case CAMERA3_STREAM_INPUT:
+                newStream->usage = GRALLOC_USAGE_HW_CAMERA_READ;
+                break;
+            case CAMERA3_STREAM_BIDIRECTIONAL:
+                newStream->usage = GRALLOC_USAGE_HW_CAMERA_READ |
+                        GRALLOC_USAGE_HW_CAMERA_WRITE;
+                break;
+        }
+    }
+
+    /*
+     * Reap the dead streams.
+     */
+    for (StreamIterator s = mStreams.begin(); s != mStreams.end();) {
+        PrivateStreamInfo *privStream =
+                static_cast<PrivateStreamInfo*>((*s)->priv);
+        if (!privStream->alive) {
+            (*s)->priv = nullptr;
+            delete privStream;
+            s = mStreams.erase(s);
+        } else {
+            ++s;
+        }
+    }
+
+    /*
+     * Can't reuse settings across configure call.
+     */
+    mPrevSettings.clear();
+
+    return OK;
+}
+
+status_t EmulatedQemuCamera3::registerStreamBuffers(
+        const camera3_stream_buffer_set *bufferSet) {
+    Mutex::Autolock l(mLock);
+    ALOGE("%s: Should not be invoked on HAL versions >= 3.2!", __FUNCTION__);
+    return NO_INIT;
+}
+
+const camera_metadata_t* EmulatedQemuCamera3::constructDefaultRequestSettings(
+        int type) {
+    Mutex::Autolock l(mLock);
+
+    if (type < 0 || type >= CAMERA3_TEMPLATE_COUNT) {
+        ALOGE("%s: Unknown request settings template: %d",
+                __FUNCTION__, type);
+        return nullptr;
+    }
+
+    if (!hasCapability(BACKWARD_COMPATIBLE) && type != CAMERA3_TEMPLATE_PREVIEW) {
+        ALOGE("%s: Template %d not supported w/o BACKWARD_COMPATIBLE capability",
+                __FUNCTION__, type);
+        return nullptr;
+    }
+
+    /*
+     * Cache is not just an optimization - pointer returned has to live at least
+     * as long as the camera device instance does.
+     */
+    if (mDefaultTemplates[type] != nullptr) {
+        return mDefaultTemplates[type];
+    }
+
+    CameraMetadata settings;
+
+    /* android.request */
+
+    static const uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL;
+    settings.update(ANDROID_REQUEST_METADATA_MODE, &metadataMode, 1);
+
+    static const int32_t id = 0;
+    settings.update(ANDROID_REQUEST_ID, &id, 1);
+
+    static const int32_t frameCount = 0;
+    settings.update(ANDROID_REQUEST_FRAME_COUNT, &frameCount, 1);
+
+    /* android.lens */
+
+    static const float focalLength = 5.0f;
+    settings.update(ANDROID_LENS_FOCAL_LENGTH, &focalLength, 1);
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const float focusDistance = 0;
+        settings.update(ANDROID_LENS_FOCUS_DISTANCE, &focusDistance, 1);
+
+        static const float aperture = 2.8f;
+        settings.update(ANDROID_LENS_APERTURE, &aperture, 1);
+
+        static const float filterDensity = 0;
+        settings.update(ANDROID_LENS_FILTER_DENSITY, &filterDensity, 1);
+
+        static const uint8_t opticalStabilizationMode =
+                ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+        settings.update(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+                &opticalStabilizationMode, 1);
+
+        // FOCUS_RANGE set only in frame
+    }
+
+    /* android.flash */
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
+        settings.update(ANDROID_FLASH_MODE, &flashMode, 1);
+
+        static const uint8_t flashPower = 10;
+        settings.update(ANDROID_FLASH_FIRING_POWER, &flashPower, 1);
+
+        static const int64_t firingTime = 0;
+        settings.update(ANDROID_FLASH_FIRING_TIME, &firingTime, 1);
+    }
+
+    /* android.scaler */
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const int32_t cropRegion[4] = {
+            0, 0, mSensorWidth, mSensorHeight
+        };
+        settings.update(ANDROID_SCALER_CROP_REGION, cropRegion, 4);
+    }
+
+    /* android.jpeg */
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t jpegQuality = 80;
+        settings.update(ANDROID_JPEG_QUALITY, &jpegQuality, 1);
+
+        static const int32_t thumbnailSize[2] = {
+            640, 480
+        };
+        settings.update(ANDROID_JPEG_THUMBNAIL_SIZE, thumbnailSize, 2);
+
+        static const uint8_t thumbnailQuality = 80;
+        settings.update(ANDROID_JPEG_THUMBNAIL_QUALITY, &thumbnailQuality, 1);
+
+        static const double gpsCoordinates[2] = {
+            0, 0
+        };
+        settings.update(ANDROID_JPEG_GPS_COORDINATES, gpsCoordinates, 2);
+
+        static const uint8_t gpsProcessingMethod[32] = "None";
+        settings.update(ANDROID_JPEG_GPS_PROCESSING_METHOD, gpsProcessingMethod, 32);
+
+        static const int64_t gpsTimestamp = 0;
+        settings.update(ANDROID_JPEG_GPS_TIMESTAMP, &gpsTimestamp, 1);
+
+        static const int32_t jpegOrientation = 0;
+        settings.update(ANDROID_JPEG_ORIENTATION, &jpegOrientation, 1);
+    }
+
+    /* android.stats */
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t faceDetectMode =
+                ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+        settings.update(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1);
+
+        static const uint8_t hotPixelMapMode =
+                ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF;
+        settings.update(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, &hotPixelMapMode, 1);
+    }
+
+    /* android.control */
+
+    uint8_t controlIntent = 0;
+    switch (type) {
+      case CAMERA3_TEMPLATE_PREVIEW:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+        break;
+      case CAMERA3_TEMPLATE_STILL_CAPTURE:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
+        break;
+      case CAMERA3_TEMPLATE_VIDEO_RECORD:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+        break;
+      case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
+        break;
+      case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
+        break;
+      case CAMERA3_TEMPLATE_MANUAL:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL;
+        break;
+      default:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM;
+        break;
+    }
+    settings.update(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1);
+
+    const uint8_t controlMode = (type == CAMERA3_TEMPLATE_MANUAL) ?
+            ANDROID_CONTROL_MODE_OFF :
+            ANDROID_CONTROL_MODE_AUTO;
+    settings.update(ANDROID_CONTROL_MODE, &controlMode, 1);
+
+    int32_t aeTargetFpsRange[2] = {
+        5, 30
+    };
+    if (type == CAMERA3_TEMPLATE_VIDEO_RECORD ||
+            type == CAMERA3_TEMPLATE_VIDEO_SNAPSHOT) {
+        aeTargetFpsRange[0] = 30;
+    }
+    settings.update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, aeTargetFpsRange, 2);
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+        settings.update(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1);
+
+        static const uint8_t sceneMode =
+                ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
+        settings.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+
+        const uint8_t aeMode = (type == CAMERA3_TEMPLATE_MANUAL) ?
+                ANDROID_CONTROL_AE_MODE_OFF : ANDROID_CONTROL_AE_MODE_ON;
+        settings.update(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
+
+        static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
+        settings.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
+
+        static const int32_t controlRegions[5] = {
+            0, 0, 0, 0, 0
+        };
+        settings.update(ANDROID_CONTROL_AE_REGIONS, controlRegions, 5);
+
+        static const int32_t aeExpCompensation = 0;
+        settings.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &aeExpCompensation, 1);
+
+
+        static const uint8_t aeAntibandingMode =
+                ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+        settings.update(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &aeAntibandingMode, 1);
+
+        static const uint8_t aePrecaptureTrigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
+        settings.update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &aePrecaptureTrigger, 1);
+
+        const uint8_t awbMode = (type == CAMERA3_TEMPLATE_MANUAL) ?
+                ANDROID_CONTROL_AWB_MODE_OFF :
+                ANDROID_CONTROL_AWB_MODE_AUTO;
+        settings.update(ANDROID_CONTROL_AWB_MODE, &awbMode, 1);
+
+        static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
+        settings.update(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1);
+
+        uint8_t afMode = 0;
+
+        if (mFacingBack) {
+            switch (type) {
+                case CAMERA3_TEMPLATE_PREVIEW:
+                    afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+                    break;
+                case CAMERA3_TEMPLATE_STILL_CAPTURE:
+                    afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+                    break;
+                case CAMERA3_TEMPLATE_VIDEO_RECORD:
+                    afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+                    break;
+                case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+                    afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+                    break;
+                case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+                    afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+                    break;
+                case CAMERA3_TEMPLATE_MANUAL:
+                    afMode = ANDROID_CONTROL_AF_MODE_OFF;
+                    break;
+                default:
+                    afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+                    break;
+            }
+        } else {
+            afMode = ANDROID_CONTROL_AF_MODE_OFF;
+        }
+        settings.update(ANDROID_CONTROL_AF_MODE, &afMode, 1);
+        settings.update(ANDROID_CONTROL_AF_REGIONS, controlRegions, 5);
+
+        static const uint8_t afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
+        settings.update(ANDROID_CONTROL_AF_TRIGGER, &afTrigger, 1);
+
+        static const uint8_t vstabMode =
+                ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+        settings.update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+                        &vstabMode, 1);
+
+        static const uint8_t blackLevelLock = ANDROID_BLACK_LEVEL_LOCK_OFF;
+        settings.update(ANDROID_BLACK_LEVEL_LOCK, &blackLevelLock, 1);
+
+        static const uint8_t lensShadingMapMode =
+                ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
+        settings.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
+                        &lensShadingMapMode, 1);
+
+        static const uint8_t aberrationMode =
+                ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
+        settings.update(ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+                        &aberrationMode, 1);
+
+        static const int32_t testPatternMode =
+                ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
+        settings.update(ANDROID_SENSOR_TEST_PATTERN_MODE, &testPatternMode, 1);
+    }
+
+    mDefaultTemplates[type] = settings.release();
+
+    return mDefaultTemplates[type];
+}
+
+status_t EmulatedQemuCamera3::processCaptureRequest(
+        camera3_capture_request *request) {
+    Mutex::Autolock l(mLock);
+    status_t res;
+
+    /* Validation */
+
+    if (mStatus < STATUS_READY) {
+        ALOGE("%s: Can't submit capture requests in state %d", __FUNCTION__,
+                mStatus);
+        return INVALID_OPERATION;
+    }
+
+    if (request == nullptr) {
+        ALOGE("%s: NULL request!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    uint32_t frameNumber = request->frame_number;
+
+    if (request->settings == nullptr && mPrevSettings.isEmpty()) {
+        ALOGE("%s: Request %d: NULL settings for first request after"
+                "configureStreams()", __FUNCTION__, frameNumber);
+        return BAD_VALUE;
+    }
+
+    if (request->input_buffer != nullptr &&
+            request->input_buffer->stream != mInputStream) {
+        ALOGE("%s: Request %d: Input buffer not from input stream!",
+                __FUNCTION__, frameNumber);
+        ALOGV("%s: Bad stream %p, expected: %p", __FUNCTION__,
+                request->input_buffer->stream, mInputStream);
+        ALOGV("%s: Bad stream type %d, expected stream type %d", __FUNCTION__,
+                request->input_buffer->stream->stream_type,
+                mInputStream ? mInputStream->stream_type : -1);
+
+        return BAD_VALUE;
+    }
+
+    if (request->num_output_buffers < 1 || request->output_buffers == nullptr) {
+        ALOGE("%s: Request %d: No output buffers provided!",
+                __FUNCTION__, frameNumber);
+        return BAD_VALUE;
+    }
+
+    /*
+     * Validate all buffers, starting with input buffer if it's given.
+     */
+
+    ssize_t idx;
+    const camera3_stream_buffer_t *b;
+    if (request->input_buffer != nullptr) {
+        idx = -1;
+        b = request->input_buffer;
+    } else {
+        idx = 0;
+        b = request->output_buffers;
+    }
+    do {
+        PrivateStreamInfo *priv =
+                static_cast<PrivateStreamInfo*>(b->stream->priv);
+        if (priv == nullptr) {
+            ALOGE("%s: Request %d: Buffer %zu: Unconfigured stream!",
+                    __FUNCTION__, frameNumber, idx);
+            return BAD_VALUE;
+        }
+        if (!priv->alive) {
+            ALOGE("%s: Request %d: Buffer %zu: Dead stream!",
+                    __FUNCTION__, frameNumber, idx);
+            return BAD_VALUE;
+        }
+        if (b->status != CAMERA3_BUFFER_STATUS_OK) {
+            ALOGE("%s: Request %d: Buffer %zu: Status not OK!",
+                    __FUNCTION__, frameNumber, idx);
+            return BAD_VALUE;
+        }
+        if (b->release_fence != -1) {
+            ALOGE("%s: Request %d: Buffer %zu: Has a release fence!",
+                    __FUNCTION__, frameNumber, idx);
+            return BAD_VALUE;
+        }
+        if (b->buffer == nullptr) {
+            ALOGE("%s: Request %d: Buffer %zu: NULL buffer handle!",
+                    __FUNCTION__, frameNumber, idx);
+            return BAD_VALUE;
+        }
+        idx++;
+        b = &(request->output_buffers[idx]);
+    } while (idx < (ssize_t)request->num_output_buffers);
+
+    // TODO: Validate settings parameters.
+
+    /*
+     * Start processing this request.
+     */
+
+    mStatus = STATUS_ACTIVE;
+
+    CameraMetadata settings;
+
+    if (request->settings == nullptr) {
+        settings.acquire(mPrevSettings);
+    } else {
+        settings = request->settings;
+    }
+
+    /*
+     * Get ready for sensor config.
+     */
+
+    // TODO: We shouldn't need exposureTime or frameDuration for webcams.
+    nsecs_t exposureTime;
+    nsecs_t frameDuration;
+    bool needJpeg = false;
+    camera_metadata_entry_t entry;
+
+    entry = settings.find(ANDROID_SENSOR_EXPOSURE_TIME);
+    exposureTime = (entry.count > 0) ?
+            entry.data.i64[0] :
+            QemuSensor::kExposureTimeRange[0];
+    entry = settings.find(ANDROID_SENSOR_FRAME_DURATION);
+    frameDuration = (entry.count > 0) ?
+            entry.data.i64[0] :
+            QemuSensor::kFrameDurationRange[0];
+
+    if (exposureTime > frameDuration) {
+        frameDuration = exposureTime + QemuSensor::kMinVerticalBlank;
+        settings.update(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
+    }
+
+    Buffers *sensorBuffers = new Buffers();
+    HalBufferVector *buffers = new HalBufferVector();
+
+    sensorBuffers->setCapacity(request->num_output_buffers);
+    buffers->setCapacity(request->num_output_buffers);
+
+    /*
+     * Process all the buffers we got for output, constructing internal buffer
+     * structures for them, and lock them for writing.
+     */
+    for (size_t i = 0; i < request->num_output_buffers; ++i) {
+        const camera3_stream_buffer &srcBuf = request->output_buffers[i];
+        StreamBuffer destBuf;
+        destBuf.streamId = kGenericStreamId;
+        destBuf.width = srcBuf.stream->width;
+        destBuf.height = srcBuf.stream->height;
+        // For goldfish, IMPLEMENTATION_DEFINED is always RGBx_8888.
+        destBuf.format = (srcBuf.stream->format ==
+                          HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) ?
+                HAL_PIXEL_FORMAT_RGBA_8888 :
+                srcBuf.stream->format;
+        destBuf.stride = srcBuf.stream->width;
+        destBuf.dataSpace = srcBuf.stream->data_space;
+        destBuf.buffer = srcBuf.buffer;
+
+        if (destBuf.format == HAL_PIXEL_FORMAT_BLOB) {
+            needJpeg = true;
+        }
+
+        // Wait on fence.
+        sp<Fence> bufferAcquireFence = new Fence(srcBuf.acquire_fence);
+        res = bufferAcquireFence->wait(kFenceTimeoutMs);
+        if (res == TIMED_OUT) {
+            ALOGE("%s: Request %d: Buffer %zu: Fence timed out after %d ms",
+                    __FUNCTION__, frameNumber, i, kFenceTimeoutMs);
+        }
+        if (res == OK) {
+            // Lock buffer for writing.
+            if (srcBuf.stream->format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+                if (destBuf.format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+                    android_ycbcr ycbcr = android_ycbcr();
+                    res = GrallocModule::getInstance().lock_ycbcr(
+                            *(destBuf.buffer),
+                            GRALLOC_USAGE_HW_CAMERA_WRITE,
+                            0, 0, destBuf.width, destBuf.height,
+                            &ycbcr);
+                    /*
+                     * This is only valid because we know that emulator's
+                     * YCbCr_420_888 is really contiguous NV21 under the hood.
+                     */
+                    destBuf.img = static_cast<uint8_t*>(ycbcr.y);
+                } else {
+                    ALOGE("Unexpected private format for flexible YUV: 0x%x",
+                            destBuf.format);
+                    res = INVALID_OPERATION;
+                }
+            } else {
+                res = GrallocModule::getInstance().lock(
+                    *(destBuf.buffer),
+                    GRALLOC_USAGE_HW_CAMERA_WRITE,
+                    0, 0, destBuf.width, destBuf.height,
+                    (void**)&(destBuf.img));
+
+            }
+            if (res != OK) {
+                ALOGE("%s: Request %d: Buffer %zu: Unable to lock buffer",
+                        __FUNCTION__, frameNumber, i);
+            }
+        }
+
+        if (res != OK) {
+            /*
+             * Either waiting or locking failed. Unlock locked buffers and bail
+             * out.
+             */
+            for (size_t j = 0; j < i; j++) {
+                GrallocModule::getInstance().unlock(
+                        *(request->output_buffers[i].buffer));
+            }
+            delete sensorBuffers;
+            delete buffers;
+            return NO_INIT;
+        }
+
+        sensorBuffers->push_back(destBuf);
+        buffers->push_back(srcBuf);
+    }
+
+    /*
+     * Wait for JPEG compressor to not be busy, if needed.
+     */
+    if (needJpeg) {
+        bool ready = mJpegCompressor->waitForDone(kJpegTimeoutNs);
+        if (!ready) {
+            ALOGE("%s: Timeout waiting for JPEG compression to complete!",
+                    __FUNCTION__);
+            return NO_INIT;
+        }
+        res = mJpegCompressor->reserve();
+        if (res != OK) {
+            ALOGE("%s: Error managing JPEG compressor resources, can't "
+                    "reserve it!", __FUNCTION__);
+            return NO_INIT;
+        }
+    }
+
+    /*
+     * TODO: We shouldn't need to wait for sensor readout with a webcam, because
+     * we might be wasting time.
+     */
+
+    /*
+     * Wait until the in-flight queue has room.
+     */
+    res = mReadoutThread->waitForReadout();
+    if (res != OK) {
+        ALOGE("%s: Timeout waiting for previous requests to complete!",
+                __FUNCTION__);
+        return NO_INIT;
+    }
+
+    /*
+     * Wait until sensor's ready. This waits for lengthy amounts of time with
+     * mLock held, but the interface spec is that no other calls may by done to
+     * the HAL by the framework while process_capture_request is happening.
+     */
+    int syncTimeoutCount = 0;
+    while(!mSensor->waitForVSync(kSyncWaitTimeout)) {
+        if (mStatus == STATUS_ERROR) {
+            return NO_INIT;
+        }
+        if (syncTimeoutCount == kMaxSyncTimeoutCount) {
+            ALOGE("%s: Request %d: Sensor sync timed out after %" PRId64 " ms",
+                    __FUNCTION__, frameNumber,
+                    kSyncWaitTimeout * kMaxSyncTimeoutCount / 1000000);
+            return NO_INIT;
+        }
+        syncTimeoutCount++;
+    }
+
+    /*
+     * Configure sensor and queue up the request to the readout thread.
+     */
+    mSensor->setFrameDuration(frameDuration);
+    mSensor->setDestinationBuffers(sensorBuffers);
+    mSensor->setFrameNumber(request->frame_number);
+
+    ReadoutThread::Request r;
+    r.frameNumber = request->frame_number;
+    r.settings = settings;
+    r.sensorBuffers = sensorBuffers;
+    r.buffers = buffers;
+
+    mReadoutThread->queueCaptureRequest(r);
+    ALOGVV("%s: Queued frame %d", __FUNCTION__, request->frame_number);
+
+    // Cache the settings for next time.
+    mPrevSettings.acquire(settings);
+
+    return OK;
+}
+
+status_t EmulatedQemuCamera3::flush() {
+    ALOGW("%s: Not implemented; ignored", __FUNCTION__);
+    return OK;
+}
+
+/*****************************************************************************
+ * Private Methods
+ ****************************************************************************/
+
+status_t EmulatedQemuCamera3::getCameraCapabilities() {
+    const char *key = mFacingBack ? "qemu.sf.back_camera_caps" :
+            "qemu.sf.front_camera_caps";
+
+    /*
+     * Defined by 'qemu.sf.*_camera_caps' boot property: if the property doesn't
+     * exist, it is assumed to list FULL.
+     */
+    char prop[PROPERTY_VALUE_MAX];
+    if (property_get(key, prop, nullptr) > 0) {
+        char *saveptr = nullptr;
+        char *cap = strtok_r(prop, " ,", &saveptr);
+        while (cap != nullptr) {
+            for (int i = 0; i < NUM_CAPABILITIES; ++i) {
+                if (!strcasecmp(cap, sAvailableCapabilitiesStrings[i])) {
+                    mCapabilities.add(static_cast<AvailableCapabilities>(i));
+                    break;
+                }
+            }
+            cap = strtok_r(nullptr, " ,", &saveptr);
+        }
+        if (mCapabilities.size() == 0) {
+            ALOGE("qemu.sf.back_camera_caps had no valid capabilities: %s", prop);
+        }
+    }
+
+    mCapabilities.add(BACKWARD_COMPATIBLE);
+
+    ALOGI("Camera %d capabilities:", mCameraID);
+    for (size_t i = 0; i < mCapabilities.size(); ++i) {
+        ALOGI("  %s", sAvailableCapabilitiesStrings[mCapabilities[i]]);
+    }
+
+    return OK;
+}
+
+bool EmulatedQemuCamera3::hasCapability(AvailableCapabilities cap) {
+    ssize_t idx = mCapabilities.indexOf(cap);
+    return idx >= 0;
+}
+
+status_t EmulatedQemuCamera3::constructStaticInfo() {
+    CameraMetadata info;
+    Vector<int32_t> availableCharacteristicsKeys;
+    status_t res;
+
+#define ADD_STATIC_ENTRY(name, varptr, count) \
+        availableCharacteristicsKeys.add(name);   \
+        res = info.update(name, varptr, count); \
+        if (res != OK) return res
+
+    static const float sensorPhysicalSize[2] = {3.20f, 2.40f};  // mm
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
+            sensorPhysicalSize, 2);
+
+    const int32_t pixelArray[] = {mSensorWidth, mSensorHeight};
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
+            pixelArray, 2);
+    const int32_t activeArray[] = {0, 0, mSensorWidth, mSensorHeight};
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+            activeArray, 4);
+
+    static const int32_t orientation = 90;  // Aligned with 'long edge'.
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_ORIENTATION, &orientation, 1);
+
+    static const uint8_t timestampSource =
+            ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN;
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE, &timestampSource, 1);
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const int32_t availableTestPatternModes[] = {
+            ANDROID_SENSOR_TEST_PATTERN_MODE_OFF
+        };
+        ADD_STATIC_ENTRY(ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
+                availableTestPatternModes,
+                sizeof(availableTestPatternModes) / sizeof(int32_t));
+    }
+
+    /* android.lens */
+
+    static const float focalLength = 3.30f; // mm
+    ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
+            &focalLength, 1);
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        // 5 cm min focus distance for back camera; infinity (fixed focus) for front
+        const float minFocusDistance = mFacingBack ? 1.0 / 0.05 : 0.0;
+        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+                &minFocusDistance, 1);
+
+        // 5 m hyperfocal distance for back camera; infinity (fixed focus) for front
+        const float hyperFocalDistance = mFacingBack ? 1.0 / 5.0 : 0.0;
+        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
+                &minFocusDistance, 1);
+
+        static const float aperture = 2.8f;
+        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
+                &aperture, 1);
+        static const float filterDensity = 0;
+        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES,
+                &filterDensity, 1);
+        static const uint8_t availableOpticalStabilization =
+                ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+                &availableOpticalStabilization, 1);
+
+        static const int32_t lensShadingMapSize[] = {1, 1};
+        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_SHADING_MAP_SIZE, lensShadingMapSize,
+                sizeof(lensShadingMapSize) / sizeof(int32_t));
+
+        static const uint8_t lensFocusCalibration =
+                ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_APPROXIMATE;
+        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION,
+                &lensFocusCalibration, 1);
+    }
+
+    static const uint8_t lensFacing = mFacingBack ?
+            ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT;
+    ADD_STATIC_ENTRY(ANDROID_LENS_FACING, &lensFacing, 1);
+
+    /* android.flash */
+
+    static const uint8_t flashAvailable = 0;
+    ADD_STATIC_ENTRY(ANDROID_FLASH_INFO_AVAILABLE, &flashAvailable, 1);
+
+    /* android.scaler */
+
+    std::vector<int32_t> availableStreamConfigurations;
+    std::vector<int64_t> availableMinFrameDurations;
+    std::vector<int64_t> availableStallDurations;
+
+    /*
+     * Build stream configurations, min frame durations, and stall durations for
+     * all resolutions reported by camera device.
+     */
+    for (const auto &res : mResolutions) {
+        int32_t width = res.first, height = res.second;
+        std::vector<int32_t> currentResStreamConfigurations = {
+            HAL_PIXEL_FORMAT_BLOB, width, height,
+            ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+
+            HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, width, height,
+            ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+
+            HAL_PIXEL_FORMAT_YCbCr_420_888, width, height,
+            ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+
+            HAL_PIXEL_FORMAT_RGBA_8888, width, height,
+            ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT
+        };
+        std::vector<int32_t> currentResMinFrameDurations = {
+            HAL_PIXEL_FORMAT_BLOB, width, height,
+            QemuSensor::kFrameDurationRange[0],
+
+            HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, width, height,
+            QemuSensor::kFrameDurationRange[0],
+
+            HAL_PIXEL_FORMAT_YCbCr_420_888, width, height,
+            QemuSensor::kFrameDurationRange[0],
+
+            HAL_PIXEL_FORMAT_RGBA_8888, width, height,
+            QemuSensor::kFrameDurationRange[0]
+        };
+        std::vector<int32_t> currentResStallDurations = {
+            // We should only introduce stall times with JPEG-compressed frames.
+            HAL_PIXEL_FORMAT_BLOB, width, height,
+            QemuSensor::kFrameDurationRange[0],
+
+            HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, width, height, 0,
+
+            HAL_PIXEL_FORMAT_YCbCr_420_888, width, height, 0,
+
+            HAL_PIXEL_FORMAT_RGBA_8888, width, height, 0
+        };
+        availableStreamConfigurations.insert(
+                availableStreamConfigurations.end(),
+                currentResStreamConfigurations.begin(),
+                currentResStreamConfigurations.end());
+        availableMinFrameDurations.insert(
+                availableMinFrameDurations.end(),
+                currentResMinFrameDurations.begin(),
+                currentResMinFrameDurations.end());
+        availableStallDurations.insert(
+                availableStallDurations.end(),
+                currentResStallDurations.begin(),
+                currentResStallDurations.end());
+    }
+
+    /*
+     * Now, if nonempty, add them to the camera's available characteristics.
+     */
+    if (availableStreamConfigurations.size() > 0) {
+        ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+                availableStreamConfigurations.data(),
+                availableStreamConfigurations.size());
+    }
+    if (availableMinFrameDurations.size() > 0) {
+        ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+                &availableMinFrameDurations[0],
+                availableMinFrameDurations.size());
+    }
+    if (availableStallDurations.size() > 0) {
+        ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
+                &availableStallDurations[0],
+                availableStallDurations.size());
+    }
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t croppingType = ANDROID_SCALER_CROPPING_TYPE_FREEFORM;
+        ADD_STATIC_ENTRY(ANDROID_SCALER_CROPPING_TYPE,
+                &croppingType, 1);
+
+        static const float maxZoom = 10;
+        ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
+                &maxZoom, 1);
+    }
+
+    /* android.jpeg */
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const int32_t jpegThumbnailSizes[] = {
+            0, 0,
+            160, 120,
+            320, 240
+        };
+        ADD_STATIC_ENTRY(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+                jpegThumbnailSizes,
+                sizeof(jpegThumbnailSizes) / sizeof(int32_t));
+
+        static const int32_t jpegMaxSize = JpegCompressor::kMaxJpegSize;
+        ADD_STATIC_ENTRY(ANDROID_JPEG_MAX_SIZE, &jpegMaxSize, 1);
+    }
+
+    /* android.stats */
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t availableFaceDetectModes[] = {
+            ANDROID_STATISTICS_FACE_DETECT_MODE_OFF,
+            ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE,
+            ANDROID_STATISTICS_FACE_DETECT_MODE_FULL
+        };
+        ADD_STATIC_ENTRY(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+                availableFaceDetectModes,
+                sizeof(availableFaceDetectModes));
+
+        static const int32_t maxFaceCount = 8;
+        ADD_STATIC_ENTRY(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
+                &maxFaceCount, 1);
+
+
+        static const uint8_t availableShadingMapModes[] = {
+            ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF
+        };
+        ADD_STATIC_ENTRY(
+                ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES,
+                availableShadingMapModes, sizeof(availableShadingMapModes));
+    }
+
+    /* android.sync */
+
+    static const int32_t maxLatency =
+            hasCapability(FULL_LEVEL) ?
+            ANDROID_SYNC_MAX_LATENCY_PER_FRAME_CONTROL : 3;
+    ADD_STATIC_ENTRY(ANDROID_SYNC_MAX_LATENCY, &maxLatency, 1);
+
+    /* android.control */
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t availableControlModes[] = {
+            ANDROID_CONTROL_MODE_OFF,
+            ANDROID_CONTROL_MODE_AUTO,
+            ANDROID_CONTROL_MODE_USE_SCENE_MODE
+        };
+        ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_MODES,
+                availableControlModes, sizeof(availableControlModes));
+    } else {
+        static const uint8_t availableControlModes[] = {
+            ANDROID_CONTROL_MODE_AUTO
+        };
+        ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_MODES,
+                availableControlModes, sizeof(availableControlModes));
+    }
+
+    static const uint8_t availableSceneModes[] = {
+        hasCapability(BACKWARD_COMPATIBLE) ?
+            ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY :
+            ANDROID_CONTROL_SCENE_MODE_DISABLED
+    };
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
+            availableSceneModes, sizeof(availableSceneModes));
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t availableEffects[] = {
+            ANDROID_CONTROL_EFFECT_MODE_OFF
+        };
+        ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_EFFECTS,
+                availableEffects, sizeof(availableEffects));
+    }
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const int32_t max3aRegions[] = {
+            /* AE */ 1,
+            /* AWB */ 0,
+            /* AF */ 1
+        };
+        ADD_STATIC_ENTRY(ANDROID_CONTROL_MAX_REGIONS,
+                max3aRegions,
+                sizeof(max3aRegions) / sizeof(max3aRegions[0]));
+
+        static const uint8_t availableAeModes[] = {
+            ANDROID_CONTROL_AE_MODE_OFF,
+            ANDROID_CONTROL_AE_MODE_ON
+        };
+        ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_AVAILABLE_MODES,
+                availableAeModes, sizeof(availableAeModes));
+
+        static const camera_metadata_rational exposureCompensationStep = {1, 3};
+        ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_COMPENSATION_STEP,
+                &exposureCompensationStep, 1);
+
+        int32_t exposureCompensationRange[] = {-9, 9};
+        ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+                exposureCompensationRange,
+                sizeof(exposureCompensationRange) / sizeof(int32_t));
+    }
+
+    static const int32_t availableTargetFpsRanges[] = {
+        5, 30, 15, 30, 15, 15, 30, 30
+    };
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+            availableTargetFpsRanges,
+            sizeof(availableTargetFpsRanges) / sizeof(int32_t));
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t availableAntibandingModes[] = {
+            ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
+            ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO
+        };
+        ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+                availableAntibandingModes, sizeof(availableAntibandingModes));
+    }
+
+    static const uint8_t aeLockAvailable = hasCapability(BACKWARD_COMPATIBLE) ?
+            ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE :
+            ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE;
+
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
+            &aeLockAvailable, 1);
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t availableAwbModes[] = {
+            ANDROID_CONTROL_AWB_MODE_OFF,
+            ANDROID_CONTROL_AWB_MODE_AUTO,
+            ANDROID_CONTROL_AWB_MODE_INCANDESCENT,
+            ANDROID_CONTROL_AWB_MODE_FLUORESCENT,
+            ANDROID_CONTROL_AWB_MODE_DAYLIGHT,
+            ANDROID_CONTROL_AWB_MODE_SHADE,
+        };
+        ADD_STATIC_ENTRY(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+                availableAwbModes, sizeof(availableAwbModes));
+    }
+
+    static const uint8_t awbLockAvailable = hasCapability(BACKWARD_COMPATIBLE) ?
+            ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE :
+            ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE;
+
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
+            &awbLockAvailable, 1);
+
+    static const uint8_t availableAfModesBack[] = {
+        ANDROID_CONTROL_AF_MODE_OFF,
+        ANDROID_CONTROL_AF_MODE_AUTO,
+        ANDROID_CONTROL_AF_MODE_MACRO,
+        ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO,
+        ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE
+    };
+
+    static const uint8_t availableAfModesFront[] = {
+        ANDROID_CONTROL_AF_MODE_OFF
+    };
+
+    if (mFacingBack && hasCapability(BACKWARD_COMPATIBLE)) {
+        ADD_STATIC_ENTRY(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+                availableAfModesBack, sizeof(availableAfModesBack));
+    } else {
+        ADD_STATIC_ENTRY(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+                availableAfModesFront, sizeof(availableAfModesFront));
+    }
+
+    static const uint8_t availableVstabModes[] = {
+        ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF,
+    };
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+            availableVstabModes, sizeof(availableVstabModes));
+
+    /* android.colorCorrection */
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t availableAberrationModes[] = {
+            ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF,
+            ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST,
+            ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY
+        };
+        ADD_STATIC_ENTRY(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
+                availableAberrationModes, sizeof(availableAberrationModes));
+    } else {
+        static const uint8_t availableAberrationModes[] = {
+            ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF,
+        };
+        ADD_STATIC_ENTRY(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
+                availableAberrationModes, sizeof(availableAberrationModes));
+    }
+
+    /* android.edge */
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t availableEdgeModes[] = {
+            ANDROID_EDGE_MODE_OFF,
+            ANDROID_EDGE_MODE_FAST,
+            ANDROID_EDGE_MODE_HIGH_QUALITY,
+        };
+        ADD_STATIC_ENTRY(ANDROID_EDGE_AVAILABLE_EDGE_MODES,
+                availableEdgeModes, sizeof(availableEdgeModes));
+    } else {
+        static const uint8_t availableEdgeModes[] = {
+            ANDROID_EDGE_MODE_OFF
+        };
+        ADD_STATIC_ENTRY(ANDROID_EDGE_AVAILABLE_EDGE_MODES,
+                availableEdgeModes, sizeof(availableEdgeModes));
+    }
+
+    /* android.info */
+
+    static const uint8_t supportedHardwareLevel =
+            ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+    ADD_STATIC_ENTRY(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
+            &supportedHardwareLevel, /* count */ 1);
+
+    /* android.noiseReduction */
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t availableNoiseReductionModes[] = {
+            ANDROID_NOISE_REDUCTION_MODE_OFF,
+            ANDROID_NOISE_REDUCTION_MODE_FAST,
+            ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY
+        };
+        ADD_STATIC_ENTRY(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
+                availableNoiseReductionModes,
+                sizeof(availableNoiseReductionModes));
+    } else {
+        static const uint8_t availableNoiseReductionModes[] = {
+            ANDROID_NOISE_REDUCTION_MODE_OFF
+        };
+        ADD_STATIC_ENTRY(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
+                availableNoiseReductionModes,
+                sizeof(availableNoiseReductionModes));
+    }
+
+    /* android.shading */
+
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t availableShadingModes[] = {
+            ANDROID_SHADING_MODE_OFF,
+            ANDROID_SHADING_MODE_FAST,
+            ANDROID_SHADING_MODE_HIGH_QUALITY
+        };
+        ADD_STATIC_ENTRY(ANDROID_SHADING_AVAILABLE_MODES, availableShadingModes,
+                sizeof(availableShadingModes));
+    } else {
+        static const uint8_t availableShadingModes[] = {
+            ANDROID_SHADING_MODE_OFF
+        };
+        ADD_STATIC_ENTRY(ANDROID_SHADING_AVAILABLE_MODES, availableShadingModes,
+                sizeof(availableShadingModes));
+    }
+
+    /* android.request */
+
+    static const int32_t maxNumOutputStreams[] = {
+        kMaxRawStreamCount, kMaxProcessedStreamCount, kMaxJpegStreamCount
+    };
+    ADD_STATIC_ENTRY(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS,
+            maxNumOutputStreams, 3);
+
+    static const uint8_t maxPipelineDepth = kMaxBufferCount;
+    ADD_STATIC_ENTRY(ANDROID_REQUEST_PIPELINE_MAX_DEPTH, &maxPipelineDepth, 1);
+
+    static const int32_t partialResultCount = 1;
+    ADD_STATIC_ENTRY(ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
+            &partialResultCount, /* count */ 1);
+
+    SortedVector<uint8_t> caps;
+    for (size_t i = 0; i < mCapabilities.size(); ++i) {
+        switch (mCapabilities[i]) {
+            case BACKWARD_COMPATIBLE:
+                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE);
+                break;
+            case PRIVATE_REPROCESSING:
+                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING);
+                break;
+            case READ_SENSOR_SETTINGS:
+                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS);
+                break;
+            case BURST_CAPTURE:
+                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE);
+                break;
+            case YUV_REPROCESSING:
+                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_YUV_REPROCESSING);
+                break;
+            case CONSTRAINED_HIGH_SPEED_VIDEO:
+                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO);
+                break;
+            default:
+                // Ignore LEVELs.
+                break;
+        }
+    }
+    ADD_STATIC_ENTRY(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, caps.array(), caps.size());
+
+    // Scan a default request template for included request keys.
+    Vector<int32_t> availableRequestKeys;
+    const camera_metadata_t *previewRequest =
+        constructDefaultRequestSettings(CAMERA3_TEMPLATE_PREVIEW);
+    for (size_t i = 0; i < get_camera_metadata_entry_count(previewRequest); ++i) {
+        camera_metadata_ro_entry_t entry;
+        get_camera_metadata_ro_entry(previewRequest, i, &entry);
+        availableRequestKeys.add(entry.tag);
+    }
+    ADD_STATIC_ENTRY(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, availableRequestKeys.array(),
+            availableRequestKeys.size());
+
+    /*
+     * Add a few more result keys. Must be kept up to date with the various
+     * places that add these.
+     */
+
+    Vector<int32_t> availableResultKeys(availableRequestKeys);
+    if (hasCapability(BACKWARD_COMPATIBLE)) {
+        availableResultKeys.add(ANDROID_CONTROL_AE_STATE);
+        availableResultKeys.add(ANDROID_CONTROL_AF_STATE);
+        availableResultKeys.add(ANDROID_CONTROL_AWB_STATE);
+        availableResultKeys.add(ANDROID_FLASH_STATE);
+        availableResultKeys.add(ANDROID_LENS_STATE);
+        availableResultKeys.add(ANDROID_LENS_FOCUS_RANGE);
+        availableResultKeys.add(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW);
+        availableResultKeys.add(ANDROID_STATISTICS_SCENE_FLICKER);
+    }
+
+    availableResultKeys.add(ANDROID_REQUEST_PIPELINE_DEPTH);
+    availableResultKeys.add(ANDROID_SENSOR_TIMESTAMP);
+
+    ADD_STATIC_ENTRY(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, availableResultKeys.array(),
+            availableResultKeys.size());
+
+    // Needs to be last, to collect all the keys set.
+
+    availableCharacteristicsKeys.add(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
+    info.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS,
+            availableCharacteristicsKeys);
+
+    mCameraInfo = info.release();
+
+#undef ADD_STATIC_ENTRY
+    return OK;
+}
+
+void EmulatedQemuCamera3::signalReadoutIdle() {
+    Mutex::Autolock l(mLock);
+    /*
+     * Need to check isIdle again because waiting on mLock may have allowed
+     * something to be placed in the in-flight queue.
+     */
+    if (mStatus == STATUS_ACTIVE && mReadoutThread->isIdle()) {
+        ALOGV("Now idle");
+        mStatus = STATUS_READY;
+    }
+}
+
+void EmulatedQemuCamera3::onQemuSensorEvent(uint32_t frameNumber, Event e,
+                                            nsecs_t timestamp) {
+    switch (e) {
+        case QemuSensor::QemuSensorListener::EXPOSURE_START:
+            ALOGVV("%s: Frame %d: Sensor started exposure at %lld",
+                    __FUNCTION__, frameNumber, timestamp);
+            // Trigger shutter notify to framework.
+            camera3_notify_msg_t msg;
+            msg.type = CAMERA3_MSG_SHUTTER;
+            msg.message.shutter.frame_number = frameNumber;
+            msg.message.shutter.timestamp = timestamp;
+            sendNotify(&msg);
+            break;
+        default:
+            ALOGW("%s: Unexpected sensor event %d at %" PRId64, __FUNCTION__,
+                    e, timestamp);
+            break;
+    }
+}
+
+EmulatedQemuCamera3::ReadoutThread::ReadoutThread(EmulatedQemuCamera3 *parent) :
+        mParent(parent), mJpegWaiting(false) {
+    ALOGV("%s: Creating readout thread", __FUNCTION__);
+}
+
+EmulatedQemuCamera3::ReadoutThread::~ReadoutThread() {
+    for (List<Request>::iterator i = mInFlightQueue.begin();
+         i != mInFlightQueue.end(); ++i) {
+        delete i->buffers;
+        delete i->sensorBuffers;
+    }
+}
+
+void EmulatedQemuCamera3::ReadoutThread::queueCaptureRequest(const Request &r) {
+    Mutex::Autolock l(mLock);
+
+    mInFlightQueue.push_back(r);
+    mInFlightSignal.signal();
+}
+
+bool EmulatedQemuCamera3::ReadoutThread::isIdle() {
+    Mutex::Autolock l(mLock);
+    return mInFlightQueue.empty() && !mThreadActive;
+}
+
+status_t EmulatedQemuCamera3::ReadoutThread::waitForReadout() {
+    status_t res;
+    Mutex::Autolock l(mLock);
+    int loopCount = 0;
+    while (mInFlightQueue.size() >= kMaxQueueSize) {
+        res = mInFlightSignal.waitRelative(mLock, kWaitPerLoop);
+        if (res != OK && res != TIMED_OUT) {
+            ALOGE("%s: Error waiting for in-flight queue to shrink",
+                    __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+        if (loopCount == kMaxWaitLoops) {
+            ALOGE("%s: Timed out waiting for in-flight queue to shrink",
+                    __FUNCTION__);
+            return TIMED_OUT;
+        }
+        loopCount++;
+    }
+    return OK;
+}
+
+bool EmulatedQemuCamera3::ReadoutThread::threadLoop() {
+    status_t res;
+
+    ALOGVV("%s: ReadoutThread waiting for request", __FUNCTION__);
+
+    // First wait for a request from the in-flight queue.
+
+    if (mCurrentRequest.settings.isEmpty()) {
+        Mutex::Autolock l(mLock);
+        if (mInFlightQueue.empty()) {
+            res = mInFlightSignal.waitRelative(mLock, kWaitPerLoop);
+            if (res == TIMED_OUT) {
+                ALOGVV("%s: ReadoutThread: Timed out waiting for request",
+                        __FUNCTION__);
+                return true;
+            } else if (res != NO_ERROR) {
+                ALOGE("%s: Error waiting for capture requests: %d",
+                        __FUNCTION__, res);
+                return false;
+            }
+        }
+        mCurrentRequest.frameNumber = mInFlightQueue.begin()->frameNumber;
+        mCurrentRequest.settings.acquire(mInFlightQueue.begin()->settings);
+        mCurrentRequest.buffers = mInFlightQueue.begin()->buffers;
+        mCurrentRequest.sensorBuffers = mInFlightQueue.begin()->sensorBuffers;
+        mInFlightQueue.erase(mInFlightQueue.begin());
+        mInFlightSignal.signal();
+        mThreadActive = true;
+        ALOGVV("%s: Beginning readout of frame %d", __FUNCTION__,
+                mCurrentRequest.frameNumber);
+    }
+
+    // Then wait for it to be delivered from the sensor.
+    ALOGVV("%s: ReadoutThread: Wait for frame to be delivered from sensor",
+            __FUNCTION__);
+
+    nsecs_t captureTime;
+    bool gotFrame =
+            mParent->mSensor->waitForNewFrame(kWaitPerLoop, &captureTime);
+    if (!gotFrame) {
+        ALOGVV("%s: ReadoutThread: Timed out waiting for sensor frame",
+                __FUNCTION__);
+        return true;
+    }
+
+    ALOGVV("Sensor done with readout for frame %d, captured at %lld ",
+            mCurrentRequest.frameNumber, captureTime);
+
+    /*
+     * Check if we need to JPEG encode a buffer, and send it for async
+     * compression if so. Otherwise prepare the buffer for return.
+     */
+    bool needJpeg = false;
+    HalBufferVector::iterator buf = mCurrentRequest.buffers->begin();
+    while (buf != mCurrentRequest.buffers->end()) {
+        bool goodBuffer = true;
+        if (buf->stream->format == HAL_PIXEL_FORMAT_BLOB &&
+                buf->stream->data_space != HAL_DATASPACE_DEPTH) {
+            Mutex::Autolock jl(mJpegLock);
+            if (mJpegWaiting) {
+                /*
+                 * This shouldn't happen, because processCaptureRequest should
+                 * be stalling until JPEG compressor is free.
+                 */
+                ALOGE("%s: Already processing a JPEG!", __FUNCTION__);
+                goodBuffer = false;
+            }
+            if (goodBuffer) {
+                // Compressor takes ownership of sensorBuffers here.
+                res = mParent->mJpegCompressor->start(mCurrentRequest.sensorBuffers,
+                        this);
+                goodBuffer = (res == OK);
+            }
+            if (goodBuffer) {
+                needJpeg = true;
+
+                mJpegHalBuffer = *buf;
+                mJpegFrameNumber = mCurrentRequest.frameNumber;
+                mJpegWaiting = true;
+
+                mCurrentRequest.sensorBuffers = nullptr;
+                buf = mCurrentRequest.buffers->erase(buf);
+
+                continue;
+            }
+            ALOGE("%s: Error compressing output buffer: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            // Fallthrough for cleanup.
+        }
+        GrallocModule::getInstance().unlock(*(buf->buffer));
+
+        buf->status = goodBuffer ? CAMERA3_BUFFER_STATUS_OK :
+                CAMERA3_BUFFER_STATUS_ERROR;
+        buf->acquire_fence = -1;
+        buf->release_fence = -1;
+
+        ++buf;
+    }
+
+    // Construct result for all completed buffers and results.
+
+    camera3_capture_result result;
+
+    if (mParent->hasCapability(BACKWARD_COMPATIBLE)) {
+        static const uint8_t sceneFlicker =
+                ANDROID_STATISTICS_SCENE_FLICKER_NONE;
+        mCurrentRequest.settings.update(ANDROID_STATISTICS_SCENE_FLICKER,
+                &sceneFlicker, 1);
+
+        static const uint8_t flashState = ANDROID_FLASH_STATE_UNAVAILABLE;
+        mCurrentRequest.settings.update(ANDROID_FLASH_STATE,
+                &flashState, 1);
+
+        nsecs_t rollingShutterSkew = QemuSensor::kFrameDurationRange[0];
+        mCurrentRequest.settings.update(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
+                &rollingShutterSkew, 1);
+
+        float focusRange[] = { 1.0f / 5.0f, 0 };  // 5 m to infinity in focus
+        mCurrentRequest.settings.update(ANDROID_LENS_FOCUS_RANGE, focusRange,
+                sizeof(focusRange) / sizeof(float));
+    }
+
+    mCurrentRequest.settings.update(ANDROID_SENSOR_TIMESTAMP,
+            &captureTime, 1);
+
+
+    // JPEGs take a stage longer.
+    const uint8_t pipelineDepth = needJpeg ? kMaxBufferCount : kMaxBufferCount - 1;
+    mCurrentRequest.settings.update(ANDROID_REQUEST_PIPELINE_DEPTH,
+            &pipelineDepth, 1);
+
+    result.frame_number = mCurrentRequest.frameNumber;
+    result.result = mCurrentRequest.settings.getAndLock();
+    result.num_output_buffers = mCurrentRequest.buffers->size();
+    result.output_buffers = mCurrentRequest.buffers->array();
+    result.input_buffer = nullptr;
+    result.partial_result = 1;
+
+    // Go idle if queue is empty, before sending result.
+    bool signalIdle = false;
+    {
+        Mutex::Autolock l(mLock);
+        if (mInFlightQueue.empty()) {
+            mThreadActive = false;
+            signalIdle = true;
+        }
+    }
+    if (signalIdle) mParent->signalReadoutIdle();
+
+    // Send it off to the framework.
+    ALOGVV("%s: ReadoutThread: Send result to framework",
+            __FUNCTION__);
+    mParent->sendCaptureResult(&result);
+
+    // Clean up.
+    mCurrentRequest.settings.unlock(result.result);
+
+    delete mCurrentRequest.buffers;
+    mCurrentRequest.buffers = nullptr;
+    if (!needJpeg) {
+        delete mCurrentRequest.sensorBuffers;
+        mCurrentRequest.sensorBuffers = nullptr;
+    }
+    mCurrentRequest.settings.clear();
+
+    return true;
+}
+
+void EmulatedQemuCamera3::ReadoutThread::onJpegDone(
+        const StreamBuffer &jpegBuffer, bool success) {
+    Mutex::Autolock jl(mJpegLock);
+
+    GrallocModule::getInstance().unlock(*(jpegBuffer.buffer));
+
+    mJpegHalBuffer.status = success ?
+            CAMERA3_BUFFER_STATUS_OK : CAMERA3_BUFFER_STATUS_ERROR;
+    mJpegHalBuffer.acquire_fence = -1;
+    mJpegHalBuffer.release_fence = -1;
+    mJpegWaiting = false;
+
+    camera3_capture_result result;
+
+    result.frame_number = mJpegFrameNumber;
+    result.result = nullptr;
+    result.num_output_buffers = 1;
+    result.output_buffers = &mJpegHalBuffer;
+    result.input_buffer = nullptr;
+    result.partial_result = 0;
+
+    if (!success) {
+        ALOGE("%s: Compression failure, returning error state buffer to"
+                " framework", __FUNCTION__);
+    } else {
+        ALOGV("%s: Compression complete, returning buffer to framework",
+                __FUNCTION__);
+    }
+
+    mParent->sendCaptureResult(&result);
+}
+
+void EmulatedQemuCamera3::ReadoutThread::onJpegInputDone(
+        const StreamBuffer &inputBuffer) {
+    /*
+     * Should never get here, since the input buffer has to be returned by end
+     * of processCaptureRequest.
+     */
+    ALOGE("%s: Unexpected input buffer from JPEG compressor!", __FUNCTION__);
+}
+
+}; // end of namespace android
diff --git a/camera/EmulatedQemuCamera3.h b/camera/EmulatedQemuCamera3.h
new file mode 100644
index 0000000..7022b40
--- /dev/null
+++ b/camera/EmulatedQemuCamera3.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA3_H
+#define HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA3_H
+
+/*
+ * Contains declaration of a class EmulatedQemuCamera3 that encapsulates
+ * functionality of a video capture device that implements version 3 of the
+ * camera device interface.
+ */
+
+#include "EmulatedCamera3.h"
+#include "fake-pipeline2/JpegCompressor.h"
+#include "qemu-pipeline3/QemuSensor.h"
+
+#include <camera/CameraMetadata.h>
+#include <utils/SortedVector.h>
+#include <utils/List.h>
+#include <utils/Mutex.h>
+
+namespace android {
+
+/*
+ * Encapsulates functionality for a v3 HAL camera which interfaces with a video
+ * capture device on the host computer.
+ *
+ * NOTE: Currently, resolutions larger than 640x480 are susceptible to
+ * performance problems.
+ *
+ * TODO: Optimize to allow regular usage of higher resolutions.
+ */
+class EmulatedQemuCamera3 : public EmulatedCamera3,
+        private QemuSensor::QemuSensorListener {
+public:
+    EmulatedQemuCamera3(int cameraId, struct hw_module_t* module);
+    virtual ~EmulatedQemuCamera3();
+
+    /*
+     * Args:
+     *     deviceName: File path where the capture device can be found (e.g.,
+     *                 "/dev/video0").
+     *     frameDims: Space-delimited resolutions with each dimension delimited
+     *                by a comma (e.g., "640,480 320,240").
+     *     facingDir: Contains either "front" or "back".
+     */
+    virtual status_t Initialize(const char *deviceName,
+                                const char *frameDims,
+                                const char *facingDir);
+
+    /**************************************************************************
+     * Camera Module API and Generic Hardware Device API Implementation
+     *************************************************************************/
+    virtual status_t connectCamera(hw_device_t **device);
+    virtual status_t closeCamera();
+    virtual status_t getCameraInfo(struct camera_info *info);
+
+protected:
+    /**************************************************************************
+     * EmulatedCamera3 Abstract API Implementation
+     *************************************************************************/
+    virtual status_t configureStreams(camera3_stream_configuration *streamList);
+    virtual status_t registerStreamBuffers(
+            const camera3_stream_buffer_set *bufferSet);
+    virtual const camera_metadata_t* constructDefaultRequestSettings(int type);
+    virtual status_t processCaptureRequest(camera3_capture_request *request);
+    virtual status_t flush();
+
+private:
+    /*
+     * Get the requested capability set (from boot properties) for this camera
+     * and populate "mCapabilities".
+     */
+    status_t getCameraCapabilities();
+
+    /*
+     * Extracts supported resolutions into "mResolutions".
+     *
+     * Args:
+     *     frameDims: A string of space-delimited resolutions with each
+     *                dimension delimited by a comma (e.g., "640,480 320,240").
+     */
+    void parseResolutions(const char *frameDims);
+
+    bool hasCapability(AvailableCapabilities cap);
+
+    /*
+     * Build the static info metadata buffer for this device.
+     */
+    status_t constructStaticInfo();
+
+    /*
+     * Signal from readout thread that it doesn't have anything to do.
+     */
+    void signalReadoutIdle();
+
+    /*
+     * Handle interrupt events from the sensor.
+     */
+    void onQemuSensorEvent(uint32_t frameNumber, Event e, nsecs_t timestamp);
+
+private:
+    /**************************************************************************
+     * Static Configuration Information
+     *************************************************************************/
+    static const uint32_t kMaxRawStreamCount = 0;
+    static const uint32_t kMaxProcessedStreamCount = 3;
+    static const uint32_t kMaxJpegStreamCount = 1;
+    static const uint32_t kMaxReprocessStreamCount = 0;
+    static const uint32_t kMaxBufferCount = 3;
+    // We need a positive stream ID to distinguish external buffers from
+    // sensor-generated buffers which use a nonpositive ID. Otherwise, HAL3 has
+    // no concept of a stream id.
+    static const uint32_t kGenericStreamId = 1;
+    static const int32_t kAvailableFormats[];
+    static const int64_t kSyncWaitTimeout = 10000000;  // 10 ms
+    static const int32_t kMaxSyncTimeoutCount = 1000;  // 1000 kSyncWaitTimeouts
+    static const uint32_t kFenceTimeoutMs = 2000;  // 2 s
+    static const nsecs_t kJpegTimeoutNs = 5000000000l;  // 5 s
+
+    /**************************************************************************
+     * Data Members
+     *************************************************************************/
+
+    // HAL interface serialization lock.
+    Mutex mLock;
+
+    const char *mDeviceName;
+    bool mFacingBack;
+    uint32_t mSensorWidth;
+    uint32_t mSensorHeight;
+    std::vector<std::pair<int32_t,int32_t>> mResolutions;
+
+
+    SortedVector<AvailableCapabilities> mCapabilities;
+
+    /*
+     * Cache for default templates. Once one is requested, the pointer must be
+     * valid at least until close() is called on the device.
+     */
+    camera_metadata_t *mDefaultTemplates[CAMERA3_TEMPLATE_COUNT];
+
+    // Private stream information, stored in camera3_stream_t->priv.
+    struct PrivateStreamInfo {
+        bool alive;
+    };
+
+    // Shortcut to the input stream.
+    camera3_stream_t* mInputStream;
+
+    typedef List<camera3_stream_t*> StreamList;
+    typedef List<camera3_stream_t*>::iterator StreamIterator;
+    typedef Vector<camera3_stream_buffer> HalBufferVector;
+
+    // All streams, including input stream.
+    StreamList mStreams;
+
+    // Cached settings from latest submitted request.
+    CameraMetadata mPrevSettings;
+
+    // Fake Hardware Interfaces
+    sp<QemuSensor> mSensor;
+    sp<JpegCompressor> mJpegCompressor;
+    friend class JpegCompressor;
+
+    /*
+     * Processing thread for sending out results.
+     */
+    class ReadoutThread : public Thread, private JpegCompressor::JpegListener {
+      public:
+        ReadoutThread(EmulatedQemuCamera3 *parent);
+        ~ReadoutThread();
+
+        struct Request {
+            uint32_t frameNumber;
+            CameraMetadata settings;
+            HalBufferVector *buffers;
+            Buffers *sensorBuffers;
+        };
+
+        /*
+         * Interface to Parent Class
+         */
+
+        /*
+         * Place request in the in-flight queue to wait for sensor capture.
+         */
+        void queueCaptureRequest(const Request &r);
+
+        /*
+         * Test if the readout thread is idle (no in-flight requests, not
+         * currently reading out anything).
+         */
+        bool isIdle();
+
+        /*
+         * Wait until isIdle is true.
+         */
+        status_t waitForReadout();
+
+      private:
+        static const nsecs_t kWaitPerLoop = 10000000L;  // 10 ms
+        static const nsecs_t kMaxWaitLoops = 1000;
+        static const size_t kMaxQueueSize = 2;
+
+        EmulatedQemuCamera3 *mParent;
+        Mutex mLock;
+
+        List<Request> mInFlightQueue;
+        Condition mInFlightSignal;
+        bool mThreadActive;
+
+        virtual bool threadLoop();
+
+        // Only accessed by threadLoop.
+        Request mCurrentRequest;
+
+        Mutex mJpegLock;
+        bool mJpegWaiting;
+        camera3_stream_buffer mJpegHalBuffer;
+        uint32_t mJpegFrameNumber;
+
+        /*
+         * Jpeg Completion Callbacks
+         */
+        virtual void onJpegDone(const StreamBuffer &jpegBuffer, bool success);
+        virtual void onJpegInputDone(const StreamBuffer &inputBuffer);
+    };
+
+    sp<ReadoutThread> mReadoutThread;
+};
+
+}; // end of namespace android
+
+#endif // HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA3_H
diff --git a/camera/GrallocModule.h b/camera/GrallocModule.h
index 1f5a8f2..72b6322 100644
--- a/camera/GrallocModule.h
+++ b/camera/GrallocModule.h
@@ -2,6 +2,7 @@
 #define EMU_CAMERA_GRALLOC_MODULE_H
 
 #include <hardware/gralloc.h>
+#include <utils/Log.h>
 
 class GrallocModule
 {
diff --git a/camera/qemu-pipeline3/QemuSensor.cpp b/camera/qemu-pipeline3/QemuSensor.cpp
new file mode 100644
index 0000000..bf1ef26
--- /dev/null
+++ b/camera/qemu-pipeline3/QemuSensor.cpp
@@ -0,0 +1,409 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Uncomment LOG_NDEBUG to enable verbose logging, and uncomment both LOG_NDEBUG
+// *and* LOG_NNDEBUG to enable very verbose logging.
+
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0
+
+#define LOG_TAG "EmulatedCamera3_QemuSensor"
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+#include "qemu-pipeline3/QemuSensor.h"
+#include "system/camera_metadata.h"
+
+#include <cmath>
+#include <cstdlib>
+#include <linux/videodev2.h>
+#include <utils/Log.h>
+
+namespace android {
+
+const nsecs_t QemuSensor::kExposureTimeRange[2] =
+        {1000L, 300000000L};  // 1 us - 0.3 sec
+const nsecs_t QemuSensor::kFrameDurationRange[2] =
+        {33331760L, 300000000L};  // ~1/30 s - 0.3 sec
+const nsecs_t QemuSensor::kMinVerticalBlank = 10000L;
+
+const int32_t QemuSensor::kSensitivityRange[2] = {100, 1600};
+const uint32_t QemuSensor::kDefaultSensitivity = 100;
+
+QemuSensor::QemuSensor(const char *deviceName, uint32_t width, uint32_t height):
+        Thread(false),
+        mWidth(width),
+        mHeight(height),
+        mActiveArray{0, 0, width, height},
+        mLastRequestWidth(-1),
+        mLastRequestHeight(-1),
+        mCameraQemuClient(),
+        mGotVSync(false),
+        mDeviceName(deviceName),
+        mFrameDuration(kFrameDurationRange[0]),
+        mNextBuffers(nullptr),
+        mFrameNumber(0),
+        mCapturedBuffers(nullptr),
+        mListener(nullptr) {
+    ALOGV("QemuSensor created with pixel array %d x %d", width, height);
+}
+
+QemuSensor::~QemuSensor() {
+    shutDown();
+}
+
+status_t QemuSensor::startUp() {
+    ALOGV("%s: Entered", __FUNCTION__);
+
+    mCapturedBuffers = nullptr;
+    status_t res = run("EmulatedQemuCamera3::QemuSensor",
+            ANDROID_PRIORITY_URGENT_DISPLAY);
+
+    if (res != OK) {
+        ALOGE("Unable to start up sensor capture thread: %d", res);
+    }
+
+    char connect_str[256];
+    snprintf(connect_str, sizeof(connect_str), "name=%s", mDeviceName);
+    res = mCameraQemuClient.connectClient(connect_str);
+    if (res != NO_ERROR) {
+        return res;
+    }
+
+    res = mCameraQemuClient.queryConnect();
+    if (res == NO_ERROR) {
+        ALOGV("%s: Connected to device '%s'",
+                __FUNCTION__, (const char*) mDeviceName);
+        mState = ECDS_CONNECTED;
+    } else {
+        ALOGE("%s: Connection to device '%s' failed",
+                __FUNCTION__, (const char*) mDeviceName);
+    }
+
+    return res;
+}
+
+status_t QemuSensor::shutDown() {
+    ALOGV("%s: Entered", __FUNCTION__);
+
+    status_t res = requestExitAndWait();
+    if (res != OK) {
+        ALOGE("Unable to shut down sensor capture thread: %d", res);
+    }
+
+    /* Stop the actual camera device. */
+    res = mCameraQemuClient.queryStop();
+    if (res == NO_ERROR) {
+        mState = ECDS_CONNECTED;
+        ALOGV("%s: Qemu camera device '%s' is stopped",
+                __FUNCTION__, (const char*) mDeviceName);
+    } else {
+        ALOGE("%s: Unable to stop device '%s'",
+                __FUNCTION__, (const char*) mDeviceName);
+    }
+
+    return res;
+}
+
+void QemuSensor::setFrameDuration(uint64_t ns) {
+    Mutex::Autolock lock(mControlMutex);
+    ALOGVV("Frame duration set to %f", ns/1000000.f);
+    mFrameDuration = ns;
+}
+
+void QemuSensor::setDestinationBuffers(Buffers *buffers) {
+    Mutex::Autolock lock(mControlMutex);
+    mNextBuffers = buffers;
+}
+
+void QemuSensor::setFrameNumber(uint32_t frameNumber) {
+    Mutex::Autolock lock(mControlMutex);
+    mFrameNumber = frameNumber;
+}
+
+bool QemuSensor::waitForVSync(nsecs_t reltime) {
+    int res;
+    Mutex::Autolock lock(mControlMutex);
+
+    mGotVSync = false;
+    res = mVSync.waitRelative(mControlMutex, reltime);
+    if (res != OK && res != TIMED_OUT) {
+        ALOGE("%s: Error waiting for VSync signal: %d", __FUNCTION__, res);
+        return false;
+    }
+    return mGotVSync;
+}
+
+bool QemuSensor::waitForNewFrame(nsecs_t reltime, nsecs_t *captureTime) {
+    Mutex::Autolock lock(mReadoutMutex);
+    uint8_t *ret;
+    if (mCapturedBuffers == nullptr) {
+        int res;
+        res = mReadoutAvailable.waitRelative(mReadoutMutex, reltime);
+        if (res == TIMED_OUT) {
+            return false;
+        } else if (res != OK || mCapturedBuffers == nullptr) {
+            ALOGE("Error waiting for sensor readout signal: %d", res);
+            return false;
+        }
+    }
+    mReadoutComplete.signal();
+
+    *captureTime = mCaptureTime;
+    mCapturedBuffers = nullptr;
+    return true;
+}
+
+QemuSensor::QemuSensorListener::~QemuSensorListener() {
+}
+
+void QemuSensor::setQemuSensorListener(QemuSensorListener *listener) {
+    Mutex::Autolock lock(mControlMutex);
+    mListener = listener;
+}
+
+status_t QemuSensor::readyToRun() {
+    ALOGV("Starting up sensor thread");
+    mStartupTime = systemTime();
+    mNextCaptureTime = 0;
+    mNextCapturedBuffers = nullptr;
+    return OK;
+}
+
+bool QemuSensor::threadLoop() {
+    /*
+     * Stages are out-of-order relative to a single frame's processing, but
+     * in-order in time.
+     */
+
+    /*
+     * Stage 1: Read in latest control parameters.
+     */
+    uint64_t frameDuration;
+    Buffers *nextBuffers;
+    uint32_t frameNumber;
+    QemuSensorListener *listener = nullptr;
+    {
+        // Lock while we're grabbing readout variables.
+        Mutex::Autolock lock(mControlMutex);
+        frameDuration = mFrameDuration;
+        nextBuffers = mNextBuffers;
+        frameNumber = mFrameNumber;
+        listener = mListener;
+        // Don't reuse a buffer set.
+        mNextBuffers = nullptr;
+
+        // Signal VSync for start of readout.
+        ALOGVV("QemuSensor VSync");
+        mGotVSync = true;
+        mVSync.signal();
+    }
+
+    /*
+     * Stage 3: Read out latest captured image.
+     */
+
+    Buffers *capturedBuffers = nullptr;
+    nsecs_t captureTime = 0;
+
+    nsecs_t startRealTime = systemTime();
+    /*
+     * Stagefright cares about system time for timestamps, so base simulated
+     * time on that.
+     */
+    nsecs_t simulatedTime = startRealTime;
+    nsecs_t frameEndRealTime = startRealTime + frameDuration;
+
+    if (mNextCapturedBuffers != nullptr) {
+        ALOGVV("QemuSensor starting readout");
+        /*
+         * Pretend we're doing readout now; will signal once enough time has
+         * elapsed.
+         */
+        capturedBuffers = mNextCapturedBuffers;
+        captureTime = mNextCaptureTime;
+    }
+
+    /*
+     * TODO: Move this signal to another thread to simulate readout time
+     * properly.
+     */
+    if (capturedBuffers != nullptr) {
+        ALOGVV("QemuSensor readout complete");
+        Mutex::Autolock lock(mReadoutMutex);
+        if (mCapturedBuffers != nullptr) {
+            ALOGV("Waiting for readout thread to catch up!");
+            mReadoutComplete.wait(mReadoutMutex);
+        }
+
+        mCapturedBuffers = capturedBuffers;
+        mCaptureTime = captureTime;
+        mReadoutAvailable.signal();
+        capturedBuffers = nullptr;
+    }
+
+    /*
+     * Stage 2: Capture new image.
+     */
+    mNextCaptureTime = simulatedTime;
+    mNextCapturedBuffers = nextBuffers;
+
+    if (mNextCapturedBuffers != nullptr) {
+        if (listener != nullptr) {
+            listener->onQemuSensorEvent(frameNumber, QemuSensorListener::EXPOSURE_START,
+                                        mNextCaptureTime);
+        }
+
+        // Might be adding more buffers, so size isn't constant.
+        for (size_t i = 0; i < mNextCapturedBuffers->size(); ++i) {
+            const StreamBuffer &b = (*mNextCapturedBuffers)[i];
+            ALOGVV("QemuSensor capturing buffer %d: stream %d,"
+                    " %d x %d, format %x, stride %d, buf %p, img %p",
+                    i, b.streamId, b.width, b.height, b.format, b.stride,
+                    b.buffer, b.img);
+            switch (b.format) {
+                case HAL_PIXEL_FORMAT_RGB_888:
+                    captureRGB(b.img, b.width, b.height, b.stride);
+                    break;
+                case HAL_PIXEL_FORMAT_RGBA_8888:
+                    captureRGBA(b.img, b.width, b.height, b.stride);
+                    break;
+                case HAL_PIXEL_FORMAT_BLOB:
+                    if (b.dataSpace == HAL_DATASPACE_DEPTH) {
+                        ALOGE("%s: Depth clouds unsupported", __FUNCTION__);
+                    } else {
+                        /*
+                         * Add auxillary buffer of the right size. Assumes only
+                         * one BLOB (JPEG) buffer is in mNextCapturedBuffers.
+                         */
+                        StreamBuffer bAux;
+                        bAux.streamId = 0;
+                        bAux.width = b.width;
+                        bAux.height = b.height;
+                        bAux.format = HAL_PIXEL_FORMAT_RGB_888;
+                        bAux.stride = b.width;
+                        bAux.buffer = nullptr;
+                        // TODO: Reuse these.
+                        bAux.img = new uint8_t[b.width * b.height * 3];
+                        mNextCapturedBuffers->push_back(bAux);
+                    }
+                    break;
+                case HAL_PIXEL_FORMAT_YCbCr_420_888:
+                    captureNV21(b.img, b.width, b.height, b.stride);
+                    break;
+                default:
+                    ALOGE("%s: Unknown/unsupported format %x, no output",
+                            __FUNCTION__, b.format);
+                    break;
+            }
+        }
+    }
+
+    ALOGVV("QemuSensor vertical blanking interval");
+    nsecs_t workDoneRealTime = systemTime();
+    const nsecs_t timeAccuracy = 2e6;  // 2 ms of imprecision is ok.
+    if (workDoneRealTime < frameEndRealTime - timeAccuracy) {
+        timespec t;
+        t.tv_sec = (frameEndRealTime - workDoneRealTime) / 1000000000L;
+        t.tv_nsec = (frameEndRealTime - workDoneRealTime) % 1000000000L;
+
+        int ret;
+        do {
+            ret = nanosleep(&t, &t);
+        } while (ret != 0);
+    }
+    nsecs_t endRealTime = systemTime();
+    ALOGVV("Frame cycle took %d ms, target %d ms",
+            (int) ((endRealTime - startRealTime) / 1000000),
+            (int) (frameDuration / 1000000));
+    return true;
+};
+
+void QemuSensor::captureRGBA(uint8_t *img, uint32_t width, uint32_t height,
+        uint32_t stride) {
+    status_t res;
+    if (width != mLastRequestWidth || height != mLastRequestHeight) {
+        ALOGI("%s: Dimensions for the current request (%dx%d) differ from "
+                "the previous request (%dx%d). Restarting camera",
+                __FUNCTION__, width, height, mLastRequestWidth,
+                mLastRequestHeight);
+
+        if (mLastRequestWidth != -1 || mLastRequestHeight != -1) {
+            // We only need to stop the camera if this isn't the first request.
+
+            // Stop the camera device.
+            res = mCameraQemuClient.queryStop();
+            if (res == NO_ERROR) {
+                mState = ECDS_CONNECTED;
+                ALOGV("%s: Qemu camera device '%s' is stopped",
+                        __FUNCTION__, (const char*) mDeviceName);
+            } else {
+                ALOGE("%s: Unable to stop device '%s'",
+                        __FUNCTION__, (const char*) mDeviceName);
+            }
+        }
+
+        /*
+         * Pixel format doesn't matter if we're only using preview frames, since
+         * the camera service always converts them to V4L2_PIX_FMT_RGB32, so we
+         * use the pixel format below, because it causes errors if you request
+         * V4L2_PIX_FMT_RGB32 (should be fixed in the future).
+         */
+        uint32_t pixFmt = V4L2_PIX_FMT_YUV420;
+        res = mCameraQemuClient.queryStart(pixFmt, width, height);
+        if (res == NO_ERROR) {
+            mLastRequestWidth = width;
+            mLastRequestHeight = height;
+            ALOGV("%s: Qemu camera device '%s' is started for %.4s[%dx%d] frames",
+                    __FUNCTION__, (const char*) mDeviceName,
+                    reinterpret_cast<const char*>(&pixFmt),
+                    mWidth, mHeight);
+            mState = ECDS_STARTED;
+        } else {
+            ALOGE("%s: Unable to start device '%s' for %.4s[%dx%d] frames",
+                    __FUNCTION__, (const char*) mDeviceName,
+                    reinterpret_cast<const char*>(&pixFmt),
+                    mWidth, mHeight);
+            return;
+        }
+    }
+
+    // Since the format is V4L2_PIX_FMT_RGB32, we need 4 bytes per pixel.
+    size_t bufferSize = width * height * 4;
+    // Apply no white balance or exposure compensation.
+    float whiteBalance[] = {1.0f, 1.0f, 1.0f};
+    float exposureCompensation = 1.0f;
+    // Read from webcam.
+    mCameraQemuClient.queryFrame(nullptr, img, 0, bufferSize, whiteBalance[0],
+            whiteBalance[1], whiteBalance[2],
+            exposureCompensation);
+
+    ALOGVV("RGBA sensor image captured");
+}
+
+void QemuSensor::captureRGB(uint8_t *img, uint32_t width, uint32_t height, uint32_t stride) {
+    ALOGE("%s: Not implemented", __FUNCTION__);
+}
+
+void QemuSensor::captureNV21(uint8_t *img, uint32_t width, uint32_t height, uint32_t stride) {
+    ALOGE("%s: Not implemented", __FUNCTION__);
+}
+
+}; // end of namespace android
diff --git a/camera/qemu-pipeline3/QemuSensor.h b/camera/qemu-pipeline3/QemuSensor.h
new file mode 100644
index 0000000..c67296d
--- /dev/null
+++ b/camera/qemu-pipeline3/QemuSensor.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This class is an abstraction to treat a capture device (e.g., a webcam)
+ * connected to the host computer as an image sensor.  The capture device must
+ * support both 360x240 and 640x480 resolutions.
+ *
+ * The characteristics of this sensor don't correspond to any actual sensor,
+ * but are not far off typical sensors.
+ */
+
+#ifndef HW_EMULATOR_CAMERA2_QEMU_SENSOR_H
+#define HW_EMULATOR_CAMERA2_QEMU_SENSOR_H
+
+#include "fake-pipeline2/Base.h"
+#include "QemuClient.h"
+
+#include <utils/Mutex.h>
+#include <utils/Thread.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+class EmulatedFakeCamera2;
+
+class QemuSensor: private Thread, public virtual RefBase {
+  public:
+   /*
+    * Args:
+    *     deviceName: File path where the capture device can be found (e.g.,
+    *                 "/dev/video0").
+    *     width: Width of pixel array.
+    *     height: Height of pixel array.
+    */
+    QemuSensor(const char *deviceName, uint32_t width, uint32_t height);
+    ~QemuSensor();
+
+    /*
+     * Power Control
+     */
+
+    status_t startUp();
+    status_t shutDown();
+
+    /*
+     * Controls that can be updated every frame.
+     */
+
+    void setFrameDuration(uint64_t ns);
+
+    /*
+     * Each Buffer in "buffers" must be at least stride*height*2 bytes in size.
+     */
+    void setDestinationBuffers(Buffers *buffers);
+    /*
+     * To simplify tracking the sensor's current frame.
+     */
+    void setFrameNumber(uint32_t frameNumber);
+
+    /*
+     * Synchronizing with sensor operation (vertical sync).
+     */
+
+    /*
+     * Wait until the sensor outputs its next vertical sync signal, meaning it
+     * is starting readout of its latest frame of data.
+     *
+     * Returns:
+     *     true if vertical sync is signaled; false if the wait timed out.
+     */
+    bool waitForVSync(nsecs_t reltime);
+
+    /*
+     * Wait until a new frame has been read out, and then return the time
+     * capture started. May return immediately if a new frame has been pushed
+     * since the last wait for a new frame.
+     *
+     * Returns:
+     *     true if new frame is returned; false if timed out.
+     */
+    bool waitForNewFrame(nsecs_t reltime, nsecs_t *captureTime);
+
+    /*
+     * Interrupt event servicing from the sensor. Only triggers for sensor
+     * cycles that have valid buffers to write to.
+     */
+    struct QemuSensorListener {
+        enum Event {
+            EXPOSURE_START,
+        };
+
+        virtual void onQemuSensorEvent(uint32_t frameNumber, Event e,
+                nsecs_t timestamp) = 0;
+        virtual ~QemuSensorListener();
+    };
+
+    void setQemuSensorListener(QemuSensorListener *listener);
+
+    /*
+     * Static Sensor Characteristics
+     */
+    const uint32_t mWidth, mHeight;
+    const uint32_t mActiveArray[4];
+
+    static const nsecs_t kExposureTimeRange[2];
+    static const nsecs_t kFrameDurationRange[2];
+    static const nsecs_t kMinVerticalBlank;
+
+    static const int32_t kSensitivityRange[2];
+    static const uint32_t kDefaultSensitivity;
+
+  private:
+    int32_t mLastRequestWidth, mLastRequestHeight;
+
+    /*
+     * Defines possible states of the emulated camera device object.
+     */
+    enum EmulatedCameraDeviceState {
+        // Object has been constructed.
+        ECDS_CONSTRUCTED,
+        // Object has been initialized.
+        ECDS_INITIALIZED,
+        // Object has been connected to the physical device.
+        ECDS_CONNECTED,
+        // Camera device has been started.
+        ECDS_STARTED,
+    };
+    // Object state.
+    EmulatedCameraDeviceState mState;
+
+    CameraQemuClient mCameraQemuClient;
+    const char *mDeviceName;
+
+    // Always lock before accessing control parameters.
+    Mutex mControlMutex;
+    /*
+     * Control Parameters
+     */
+    Condition mVSync;
+    bool mGotVSync;
+    uint64_t mFrameDuration;
+    Buffers *mNextBuffers;
+    uint32_t mFrameNumber;
+
+    // Always lock before accessing readout variables.
+    Mutex mReadoutMutex;
+    /*
+     * Readout Variables
+     */
+    Condition mReadoutAvailable;
+    Condition mReadoutComplete;
+    Buffers *mCapturedBuffers;
+    nsecs_t mCaptureTime;
+    QemuSensorListener *mListener;
+
+    // Time of sensor startup (used for simulation zero-time point).
+    nsecs_t mStartupTime;
+
+  private:
+    /*
+     * Inherited Thread Virtual Overrides
+     */
+    virtual status_t readyToRun();
+    /*
+     * QemuSensor capture operation main loop.
+     */
+    virtual bool threadLoop();
+
+    /*
+     * Members only used by the processing thread.
+     */
+    nsecs_t mNextCaptureTime;
+    Buffers *mNextCapturedBuffers;
+
+    void captureRGBA(uint8_t *img, uint32_t width, uint32_t height,
+            uint32_t stride);
+    void captureRGB(uint8_t *img, uint32_t width, uint32_t height,
+            uint32_t stride);
+    void captureNV21(uint8_t *img, uint32_t width, uint32_t height,
+            uint32_t stride);
+};
+
+}; // end of namespace android
+
+#endif // HW_EMULATOR_CAMERA2_QEMU_SENSOR_H